Skip to content

Commit 2cb4385

Browse files
committed
Mindre forbedring av standard Kafkaoppsett
1 parent adab474 commit 2cb4385

File tree

5 files changed

+49
-95
lines changed

5 files changed

+49
-95
lines changed
+9-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,12 @@
11
# Kafka - Aiven - oppsett
2-
Denne modulen inneholder en støttemetode for oppsett av Kafka Producers og Streams + standard String-serdes for topics som bruker JSON.
2+
Denne modulen inneholder støttemetoder for oppsett av Kafka Producers og Consumer + String-standard for topics som bruker JSON.
3+
4+
Producer:
5+
* Enten opprett en KafkaSender for hver topic og bruk send med key + message
6+
* Eller opprett global KafkaSender med topic = null og bruk send med key + message + topic
7+
8+
Consumers
9+
* Håndterere (ofte Transactional) implementerer KafkaMessageHandler / KafkaStringMessageHandler
10+
* Applikasjonene definerer en Controllable som lager en KafkaConsumerManager av handlere og starter/stopper den
311

412
Enkelte applikasjoner konsumerer AVRO-topics - de må selv legge til avhengighet til confluent.io og sette opp Schema Registry og deserialisering av Avro

integrasjon/kafka-properties/pom.xml

-5
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,6 @@
2929
<artifactId>kafka-clients</artifactId>
3030
<scope>provided</scope>
3131
</dependency>
32-
<dependency>
33-
<groupId>org.apache.kafka</groupId>
34-
<artifactId>kafka-streams</artifactId>
35-
<scope>provided</scope>
36-
</dependency>
3732
</dependencies>
3833

3934
</project>

integrasjon/kafka-properties/src/main/java/no/nav/vedtak/felles/integrasjon/kafka/KafkaConsumerManager.java

+18-17
Original file line numberDiff line numberDiff line change
@@ -2,28 +2,29 @@
22

33
import java.time.Duration;
44
import java.time.LocalDateTime;
5-
import java.util.ArrayList;
65
import java.util.List;
76
import java.util.concurrent.atomic.AtomicInteger;
87
import java.util.function.BiConsumer;
98
import java.util.stream.Collectors;
109

1110
import org.apache.kafka.clients.consumer.KafkaConsumer;
12-
import org.apache.kafka.common.errors.WakeupException;
1311

1412
public class KafkaConsumerManager<K,V> {
1513

1614
private static final Duration CLOSE_TIMEOUT = Duration.ofSeconds(10);
1715

18-
private final List<KafkaMessageHandler<K,V>> handlers;
19-
private final List<KafkaConsumerLoop<K,V>> consumers = new ArrayList<>();
16+
private final List<KafkaConsumerLoop<K,V>> consumers;
2017

21-
public KafkaConsumerManager(List<KafkaMessageHandler<K, V>> handlers) {
22-
this.handlers = handlers;
18+
19+
public KafkaConsumerManager(KafkaMessageHandler<K, V> handler) {
20+
this(List.of(handler));
21+
}
22+
23+
public KafkaConsumerManager(List<? extends KafkaMessageHandler<K, V>> handlers) {
24+
this.consumers = handlers.stream().map(KafkaConsumerLoop::new).toList();
2325
}
2426

2527
public void start(BiConsumer<String, Throwable> errorlogger) {
26-
consumers.addAll(handlers.stream().map(KafkaConsumerLoop::new).toList());
2728
consumers.forEach(c -> {
2829
var ct = new Thread(c, "KC-" + c.handler().groupId());
2930
ct.setUncaughtExceptionHandler((t, e) -> { errorlogger.accept(c.handler().topic(), e); stop(); });
@@ -53,7 +54,7 @@ public boolean allStopped() {
5354
}
5455

5556
public String topicNames() {
56-
return handlers.stream().map(KafkaMessageHandler::topic).collect(Collectors.joining(","));
57+
return consumers.stream().map(KafkaConsumerLoop::handler).map(KafkaMessageHandler::topic).collect(Collectors.joining(","));
5758
}
5859

5960
private record KafkaConsumerCloser<K,V>(List<KafkaConsumerLoop<K,V>> consumers) implements Runnable {
@@ -66,7 +67,7 @@ public void run() {
6667
public static class KafkaConsumerLoop<K,V> implements Runnable {
6768

6869
private static final Duration POLL_TIMEOUT = Duration.ofMillis(100);
69-
private static final Duration CLOSE_TIMEOUT = Duration.ofSeconds(10);
70+
7071
private enum ConsumerState { UNINITIALIZED, RUNNING, STOPPING, STOPPED }
7172
private static final int RUNNING = ConsumerState.RUNNING.hashCode();
7273

@@ -77,23 +78,25 @@ private enum ConsumerState { UNINITIALIZED, RUNNING, STOPPING, STOPPED }
7778
public KafkaConsumerLoop(KafkaMessageHandler<K,V> handler) {
7879
this.handler = handler;
7980
}
81+
82+
// Implementert som at-least-once - krever passe idempotente handleRecord og regner med at de er Transactional (commit hvert kall)
83+
// Hvis man vil komplisere ting så kan gå for exactly-once - håndtere OffsetCommit (set property ENABLE_AUTO_COMMIT_CONFIG false)
84+
// Man må da være bevisst på samspill DB-commit og Offset-commit - lage en Transactional handleRecords for alle som er pollet.
85+
// handleRecords må ta inn ConsumerRecords (alle pollet) og 2 callbacks som a) legger til konsumert og b) kaller commitAsync(konsumert)
86+
// Dessuten må man catche WakeupException og andre exceptions og avstemme håndtering (OffsetCommit) med DB-TX-Commit
8087
@Override
8188
public void run() {
8289
try(var key = handler.keyDeserializer().get(); var value = handler.valueDeserializer().get()) {
83-
var props = KafkaProperties.forConsumerGenericValue(handler.groupId(), key, value, handler.autoOffsetReset());
90+
var props = KafkaProperties.forConsumerGenericValue(handler.groupId(), key, value, handler.autoOffsetReset().orElse(null));
8491
consumer = new KafkaConsumer<>(props, key, value);
8592
consumer.subscribe(List.of(handler.topic()));
8693
running.set(RUNNING);
8794
while (running.get() == RUNNING) {
8895
var records = consumer.poll(POLL_TIMEOUT);
89-
// Hvis man vil komplisere ting så kan man håndtere både OffsetCommit og DBcommit i en Transcational handleRecords.
90-
// handleRecords må ta inn alle som er pollet (records) og 2 callbacks som a) legger til konsumert og b) commitAsync(konsumert)
9196
for (var record : records) {
9297
handler.handleRecord(record.key(), record.value());
9398
}
9499
}
95-
} catch (WakeupException e) {
96-
// ignore for shutdown
97100
} finally {
98101
if (consumer != null) {
99102
consumer.close(CLOSE_TIMEOUT);
@@ -108,9 +111,7 @@ public void shutdown() {
108111
} else {
109112
running.set(ConsumerState.STOPPED.hashCode());
110113
}
111-
if (consumer != null) {
112-
consumer.wakeup();
113-
}
114+
// Kan vurdere consumer.wakeup() + håndtere WakeupException ovenfor - men har utelatt til fordel for en tilstand og polling med kort timeout
114115
}
115116

116117
public KafkaMessageHandler<K, V> handler() {
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,5 @@
11
package no.nav.vedtak.felles.integrasjon.kafka;
22

3-
import java.util.Map;
43
import java.util.Optional;
54
import java.util.Properties;
65
import java.util.UUID;
@@ -13,17 +12,7 @@
1312
import org.apache.kafka.common.config.SslConfigs;
1413
import org.apache.kafka.common.security.auth.SecurityProtocol;
1514
import org.apache.kafka.common.serialization.Deserializer;
16-
import org.apache.kafka.common.serialization.Serde;
17-
import org.apache.kafka.common.serialization.Serdes;
18-
import org.apache.kafka.common.serialization.StringDeserializer;
1915
import org.apache.kafka.common.serialization.StringSerializer;
20-
import org.apache.kafka.streams.StreamsConfig;
21-
import org.apache.kafka.streams.errors.LogAndFailExceptionHandler;
22-
import org.apache.kafka.streams.state.RocksDBConfigSetter;
23-
import org.apache.kafka.streams.state.internals.BlockBasedTableConfigWithAccessibleCache;
24-
import org.rocksdb.BloomFilter;
25-
import org.rocksdb.LRUCache;
26-
import org.rocksdb.Options;
2716

2817
import no.nav.foreldrepenger.konfig.Environment;
2918

@@ -55,23 +44,18 @@ public static Properties forProducer() {
5544
return props;
5645
}
5746

58-
// Alle som konsumerer Json-meldinger
59-
public static Properties forConsumerStringValue(String groupId) {
60-
return forConsumerGenericValue(groupId, new StringDeserializer(), new StringDeserializer(), Optional.empty());
61-
}
62-
63-
public static <K,V> Properties forConsumerGenericValue(String groupId, Deserializer<K> valueKey, Deserializer<V> valueSerde, Optional<OffsetResetStrategy> offsetReset) {
47+
public static <K,V> Properties forConsumerGenericValue(String groupId, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, OffsetResetStrategy offsetReset) {
6448
final Properties props = new Properties();
6549

6650
props.put(CommonClientConfigs.GROUP_ID_CONFIG, groupId);
6751
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, generateClientId());
6852
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, getAivenConfig(AivenProperty.KAFKA_BROKERS));
69-
offsetReset.ifPresent(or -> props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, or.toString()));
53+
Optional.ofNullable(offsetReset).ifPresent(or -> props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, or.toString()));
7054

7155
putSecurity(props);
7256

73-
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, valueKey.getClass());
74-
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueSerde.getClass());
57+
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass());
58+
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass());
7559

7660
// Polling
7761
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100"); // Unngå store Tx dersom alle prosesseres innen samme Tx. Default 500
@@ -80,33 +64,13 @@ public static <K,V> Properties forConsumerGenericValue(String groupId, Deseriali
8064
return props;
8165
}
8266

83-
// Alle som konsumerer Json-meldinger
84-
public static Properties forStreamsStringValue(String applicationId) {
85-
return forStreamsGenericValue(applicationId, Serdes.String());
86-
}
87-
88-
public static <T> Properties forStreamsGenericValue(String applicationId, Serde<T> valueSerde) {
89-
final Properties props = new Properties();
90-
91-
props.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
92-
props.put(StreamsConfig.CLIENT_ID_CONFIG, generateClientId());
93-
props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, getAivenConfig(AivenProperty.KAFKA_BROKERS));
94-
95-
putSecurity(props);
96-
97-
props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
98-
props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, valueSerde.getClass());
99-
props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndFailExceptionHandler.class);
100-
101-
props.put(StreamsConfig.DEFAULT_DSL_STORE_CONFIG, StreamsConfig.IN_MEMORY);
102-
props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, StreamsRocksReadOnly.class);
103-
104-
// Polling
105-
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "200");
106-
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000");
67+
/*
68+
* Streams-config er fjernet. Ved evt re-innføring husk at det trends read+write til topic for å unngå log-spamming.
69+
* - APPLICATION_ID_CONFIG = tisvarende verdi som brukes for GROUP_ID_CONFIG (men kan ikke ha både streams og consumer)
70+
* - KEY+VALUE SERDE - typisk Serdes.String() + derserialization_exception = LogAndFailExceptionHandler
71+
* - Bør se på rocksdb-setting (se i historikk)
72+
*/
10773

108-
return props;
109-
}
11074

11175
// Trengs kun for de som skal konsumere Avro. Ellers ikke
11276
public static String getAvroSchemaRegistryURL() {
@@ -148,26 +112,4 @@ private static void putSecurity(Properties props) {
148112
}
149113
}
150114

151-
public static class StreamsRocksReadOnly implements RocksDBConfigSetter {
152-
153-
@Override
154-
public void setConfig(final String storeName, final Options options, final Map<String, Object> configs) {
155-
156-
BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache();
157-
tableConfig.setBlockCache(new LRUCache(1024 * 1024L));
158-
tableConfig.setBlockSize(4096L);
159-
tableConfig.setFilterPolicy(new BloomFilter());
160-
tableConfig.setCacheIndexAndFilterBlocks(true);
161-
options.setTableFormatConfig(tableConfig);
162-
163-
options.setWriteBufferSize(512 * 1024L);
164-
options.setMaxWriteBufferNumber(2);
165-
}
166-
167-
@Override
168-
public void close(final String storeName, final Options options) {
169-
// NOOP
170-
}
171-
}
172-
173115
}

integrasjon/kafka-properties/src/main/java/no/nav/vedtak/felles/integrasjon/kafka/KafkaSender.java

+12-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
package no.nav.vedtak.felles.integrasjon.kafka;
22

3+
import org.apache.kafka.clients.producer.KafkaProducer;
34
import org.apache.kafka.clients.producer.Producer;
45
import org.apache.kafka.clients.producer.ProducerRecord;
56
import org.apache.kafka.clients.producer.RecordMetadata;
@@ -11,24 +12,31 @@ public class KafkaSender {
1112
private final Producer<String, String> producer;
1213
private final String topic;
1314

14-
public KafkaSender(Producer<String, String> producer, String topic) {
15-
this.producer = producer;
15+
public KafkaSender(String topic) {
16+
this.producer = new KafkaProducer<>(KafkaProperties.forProducer());
1617
this.topic = topic;
1718
}
1819

1920
public RecordMetadata send(String key, String message) {
21+
if (topic == null) {
22+
throw kafkaPubliseringException("null", new IllegalArgumentException());
23+
}
24+
return send(key, message, this.topic);
25+
}
26+
27+
public RecordMetadata send(String key, String message, String topic) {
2028
try {
2129
var record = new ProducerRecord<>(topic, key, message);
2230
return producer.send(record).get();
2331
} catch (Exception e) {
2432
if (e instanceof InterruptedException) {
2533
Thread.currentThread().interrupt();
2634
}
27-
throw kafkaPubliseringException(e);
35+
throw kafkaPubliseringException(topic, e);
2836
}
2937
}
3038

31-
private IntegrasjonException kafkaPubliseringException(Exception e) {
39+
private IntegrasjonException kafkaPubliseringException(String topic, Exception e) {
3240
return new IntegrasjonException("F-KAFKA-925475", "Unexpected error when sending message to topic " + topic, e);
3341
}
3442

0 commit comments

Comments
 (0)