-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathEventBusKafka.cs
179 lines (152 loc) · 7.3 KB
/
EventBusKafka.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
using Autofac;
using Confluent.Kafka;
using EventBus.Abstractions;
using EventBus.Events;
using EventBus.Extensions;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json;
using System;
using System.Text;
using System.Threading;
namespace EventBus.Kafka
{
public class EventBusKafka : SubscribeProcessEvent, IEventBus, IDisposable
{
const string APP_NAME = "Kafka";
const string AUTOFAC_SCOPE_NAME = "e_event_bus";
private readonly IKafkaPersistentConnection _persistentConnection;
public EventBusKafka(IKafkaPersistentConnection persistentConnection,
ILogger<EventBusKafka> logger,
ILifetimeScope autofac,
IEventBusSubscriptionsManager subsManager)
: base(logger, subsManager, autofac, appName: APP_NAME, autofacScopeName: AUTOFAC_SCOPE_NAME)
{
_persistentConnection = persistentConnection ?? throw new ArgumentNullException(nameof(persistentConnection));
_subsManager.OnEventRemoved += SubsManager_OnEventRemoved;
}
private void SubsManager_OnEventRemoved(object sender, string eventName)
{
//TODO:....
}
public void Publish(IntegrationEvent @event)
{
var eventName = @event.GetType().Name;
_logger.LogTrace("Creating Kafka channel to publish event: {EventId} ({EventName})", @event.Id, eventName);
var message = JsonConvert.SerializeObject(@event);
var body = Encoding.UTF8.GetBytes(message);
using (var producer = _persistentConnection.CreateProducer())
{
var task = producer.ProduceAsync(_persistentConnection.Topic, new Message<string, byte[]> { Key = eventName, Value = body });
task.ContinueWith(response =>
{
if (response.IsCompletedSuccessfully)
{
_logger.LogInformation($"event: { @event.Id} wrote to offset: {response.Result.Offset}");
}
else//if (response.IsFaulted)
{
_logger.LogError($"event: { @event.Id} error Message: {response.Result.Message} || {{ResponseException}}", response.Exception);
}
});
producer.Flush(TimeSpan.FromMilliseconds(100));
}
}
public void Subscribe<T, TH>()
where T : IntegrationEvent
where TH : IIntegrationEventHandler<T>
{
var eventName = _subsManager.GetEventKey<T>();
_logger.LogInformation("Subscribing to event {EventName} with {EventHandler}", eventName, typeof(TH).GetGenericTypeName());
_subsManager.AddSubscription<T, TH>();
Consume(_persistentConnection.ConsumerCancellationToken);
}
public void SubscribeDynamic<TH>(string eventName)
where TH : IDynamicIntegrationEventHandler
{
_logger.LogInformation("Subscribing to dynamic event {EventName} with {EventHandler}", eventName, typeof(TH).GetGenericTypeName());
_subsManager.AddDynamicSubscription<TH>(eventName);
Consume(_persistentConnection.ConsumerCancellationToken);
}
public void Unsubscribe<T, TH>()
where T : IntegrationEvent
where TH : IIntegrationEventHandler<T>
{
var eventName = _subsManager.GetEventKey<T>();
_logger.LogInformation("Unsubscribing from event {EventName}", eventName);
_subsManager.RemoveSubscription<T, TH>();
}
public void UnsubscribeDynamic<TH>(string eventName)
where TH : IDynamicIntegrationEventHandler
{
_subsManager.RemoveDynamicSubscription<TH>(eventName);
}
public void Dispose()
{
_subsManager.Clear();
}
/// <summary>
/// In this example
/// - offsets are manually committed.
/// - no extra thread is created for the Poll (Consume) loop.
/// </summary>
private void Consume(CancellationToken cancellationToken)
{
const int commitPeriod = 5;
// Note: If a key or value deserializer is not set (as is the case below), the
// deserializer corresponding to the appropriate type from Confluent.Kafka.Deserializers
// will be used automatically (where available). The default deserializer for string
// is UTF8. The default deserializer for Ignore returns null for all input data
// (including non-null data).
using (var consumer = _persistentConnection.CreateConsumer())
{
consumer.Subscribe(_persistentConnection.Topic);
try
{
while (true)
{
try
{
var consumeResult = consumer.Consume(cancellationToken);
if (consumeResult.IsPartitionEOF)
{
_logger.LogInformation($"Kafka reached end of topic {consumeResult.Topic}, partition {consumeResult.Partition}, offset {consumeResult.Offset}.");
continue;
}
var eventName = consumeResult.Message.Key;
var message = Encoding.UTF8.GetString(consumeResult.Message.Value);
base.ProcessEvent(eventName, message).Wait();
_logger.LogInformation($"Kafka received message at topicPartitionOffset:{consumeResult.TopicPartitionOffset}, key:{eventName}, message: {message}");
if (consumeResult.Offset % commitPeriod == 0)
{
// The Commit method sends a "commit offsets" request to the Kafka
// cluster and synchronously waits for the response. This is very
// slow compared to the rate at which the consumer is capable of
// consuming messages. A high performance application will typically
// commit offsets relatively infrequently and be designed handle
// duplicate messages in the event of failure.
try
{
consumer.Commit(consumeResult);
}
catch (KafkaException e)
{
_logger.LogError($"Kafka commit error: {e.Error.Reason}");
}
}
}
catch (ConsumeException e)
{
//consumer.Unsubscribe();
_logger.LogError($"Kafka consume error: {e.Error.Reason}");
}
}
}
catch (OperationCanceledException)
{
_logger.LogError("Kafka closing consumer.");
consumer.Close();
}
}
}
}
}