Skip to content

Commit

Permalink
ADD: kraft controller specific exporter config, README update, dev-to…
Browse files Browse the repository at this point in the history
…olkit start.sh fix (#297)
  • Loading branch information
ram-pi authored Feb 14, 2025
1 parent 0f6d137 commit 288381b
Show file tree
Hide file tree
Showing 5 changed files with 165 additions and 10 deletions.
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -179,17 +179,17 @@ To run replicator scenario, i.e. run `start.sh --profile replicator`.
It's possible to combine profiles as well, i.e. `start.sh --profile schema-registry --profile ksqldb`.

Currently supported profiles:
- _replicator_: it will add a Kafka connect cluster with Confluent Replicator between _kafka1-kafka2-kafka3-kafka4_ and a new cluster with 1 broker _broker-dest_
- _schema-registry_: it will add Confluent Schema Registry.
- _schema-registry-primary-secondary_: it will add 2 Confluent Schema Registry, primary and secondary.
- _clusterlinking_: add Cluster Linking between _kafka1-kafka2-kafka3-kafka4_ and a new cluster with 1 broker _broker-dest_
- _connect_: it will add Kafka Connect with a datagen source connector and a file sink connector.
- _ksqldb_: it will add ksqldb server. It requires _schema-registry_ profile.
- _consumer_: it will add a demo application implemented with Spring with full client metrics
- _consumer-minimal_: it will add a demo application implemented with Spring with a limited number of client metrics
- _jr_: it will add [JR](https://jrnd.io/) to generate random traffic for kafka.
- _clusterlinking_: add Cluster Linking between _kafka1-kafka2-kafka3-kafka4_ and a new cluster with 1 broker _broker-dest_
- _ksqldb_: it will add ksqldb server. It requires _schema-registry_ profile.
- _kstream_: it will add a demo stateful kafka streams application with full client metrics (_TRACE_ level selected)
- _kui_: it will add an instance of _kafka-ui_ for topics data visualizion (available on port 18080).
- _replicator_: it will add a Kafka connect cluster with Confluent Replicator between _kafka1-kafka2-kafka3-kafka4_ and a new cluster with 1 broker _broker-dest_
- _schema-registry_: it will add Confluent Schema Registry.
- _schema-registry-primary-secondary_: it will add 2 Confluent Schema Registry, primary and secondary.
- _tieredstroage_: it will configure Confluent Platform to use Confluent Tiered Storage and a compatible S3 storage.

## DEV-toolkit FAQ
Expand Down
2 changes: 2 additions & 0 deletions dev-toolkit/docker-compose.kui.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
services:
kafka-ui:
profiles:
- kui
container_name: kui
image: provectuslabs/kafka-ui:latest
ports:
Expand Down
17 changes: 13 additions & 4 deletions dev-toolkit/start.sh
Original file line number Diff line number Diff line change
Expand Up @@ -155,9 +155,9 @@ EOF

echo -e "\nStarting profiles..."

# Start the development environment
$DOCKER_COMPOSE_CMD ${docker_args[@]} \
-f docker-compose.yaml \
# Define string with all docker-compose files
DOCKER_COMPOSE_FILES="-f docker-compose.yaml \
-f docker-compose.yaml \
-f docker-compose.replicator.yaml \
-f docker-compose.schema-registry.yaml \
-f docker-compose.ksqldb.yaml \
Expand All @@ -169,7 +169,16 @@ $DOCKER_COMPOSE_CMD ${docker_args[@]} \
-f docker-compose.connect.yaml \
-f docker-compose.kstream.yaml \
-f docker-compose.kui.yaml \
-f docker-compose.tieredstorage.yaml \
"

# if docker_args contains tieredstorage, then add the tieredstorage file
if [[ " ${docker_args[@]} " =~ " tieredstorage " ]]; then
DOCKER_COMPOSE_FILES="${DOCKER_COMPOSE_FILES} -f docker-compose.tieredstorage.yaml"
fi

# Start the development environment
$DOCKER_COMPOSE_CMD ${docker_args[@]} \
$DOCKER_COMPOSE_FILES \
up -d

# if docker_args contains connect, then start the connect
Expand Down
2 changes: 1 addition & 1 deletion jmxexporter-prometheus-grafana/cp-ansible/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ Add JMX Exporter 1.1.0:
KRaft Cluster:
```yaml
kafka_controller_jmxexporter_config_source_path: ../../jmx-monitoring-stacks/shared-assets/jmx-exporter/kafka_broker.yml
kafka_controller_jmxexporter_config_source_path: ../../jmx-monitoring-stacks/shared-assets/jmx-exporter/kafka_controller.yml
kafka_broker_jmxexporter_config_source_path: ../../jmx-monitoring-stacks/shared-assets/jmx-exporter/kafka_broker.yml
schema_registry_jmxexporter_config_source_path: ../../jmx-monitoring-stacks/shared-assets/jmx-exporter/confluent_schemaregistry.yml
kafka_connect_jmxexporter_config_source_path: ../../jmx-monitoring-stacks/shared-assets/jmx-exporter/kafka_connect.yml
Expand Down
144 changes: 144 additions & 0 deletions shared-assets/jmx-exporter/kafka_controller.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
---
startDelaySeconds: 120
lowercaseOutputName: true
lowercaseOutputLabelNames: true
blacklistObjectNames:
- "kafka.consumer:type=*,id=*"
- "kafka.consumer:type=*,client-id=*"
- "kafka.consumer:type=*,client-id=*,node-id=*"
- "kafka.producer:type=*,id=*"
- "kafka.producer:type=*,client-id=*"
- "kafka.producer:type=*,client-id=*,node-id=*"
- "kafka.*:type=kafka-metrics-count,*"
# This will ignore the admin client metrics from Kafka Brokers and will blacklist certain metrics
# that do not make sense for ingestion.
# "kafka.admin.client:type=*, node-id=*, client-id=*"
# "kafka.admin.client:type=*, client-id=*"
# "kafka.admin.client:type=*, id=*"
- "kafka.admin.client:*"
- "kafka.server:type=*,cipher=*,protocol=*,listener=*,networkProcessor=*"
#- "kafka.server:type=*"
- "kafka.server:type=app-info,id=*"
- "kafka.rest:*"
- "rest.utils:*"
- "io.confluent.common.security.jetty:*"
- "io.confluent.rest:*"
- "confluent.metadata.service:type=app-info,id=*"
- "confluent.metadata.service:type=app-info,client-id=*"
- "confluent.metadata:type=kafkaauthstore,*"
rules:
# This is by far the biggest contributor to the number of sheer metrics being produced.
# Always keep it near the top for the case of probability when so many metrics will hit the first condition and exit.
# "kafka.cluster:type=*, name=*, topic=*, partition=*"
# "kafka.log:type=*,name=*, topic=*, partition=*"
- pattern: kafka.(\w+)<type=(.+), name=(.+), topic=(.+), partition=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
cache: true
labels:
topic: "$4"
partition: "$5"
# Next two rules are similar; Value version is a GAUGE; Count version is not
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
cache: true
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Count
name: kafka_server_$1_$2
cache: true
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
cache: true
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
# "kafka.rest:type=*, topic=*, partition=*, client-id=*"
# "kafka.rest:type=*, cipher=*, protocol=*, client-id=*"
- pattern: kafka.(\w+)<type=(.+), (.+)=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2
cache: true
labels:
"$3": "$4"
"$5": "$6"
"$7": "$8"
# Count and Value
# "kafka.server:type=*, name=*, topic=*"
# "kafka.server:type=*, name=*, clientId=*"
# "kafka.server:type=*, name=*, delayedOperation=*"
# "kafka.server:type=*, name=*, fetcherType=*"
# "kafka.network:type=*, name=*, networkProcessor=*"
# "kafka.network:type=*, name=*, processor=*"
# "kafka.network:type=*, name=*, request=*"
# "kafka.network:type=*, name=*, listener=*"
# "kafka.log:type=*, name=*, logDirectory=*"
# "kafka.log:type=*, name=*, op=*"
# "kafka.rest:type=*, node-id=*, client-id=*"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>(Count|Value)
name: kafka_$1_$2_$3
cache: true
labels:
"$4": "$5"
# "kafka.consumer:type=*, topic=*, client-id=*"
# "kafka.producer:type=*, topic=*, client-id=*"
# "kafka.rest:type=*, topic=*, client-id=*"
# "kafka.server:type=*, broker-id=*, fetcher-id=*"
# "kafka.server:type=*, listener=*, networkProcessor=*"
- pattern: kafka.(\w+)<type=(.+), (.+)=(.+), (.+)=(.+)><>(Count|Value)
name: kafka_$1_$2
cache: true
labels:
"$3": "$4"
"$5": "$6"
# - pattern: "kafka.(.+)<type=(.+), (.+)=(.+), (.+)=(.+)><>(.+):"
# name: kafka_$1_$2
# cache: true
# labels:
# "$3": "$4"
# "$5": "$6"
# attribute_name: "$7"
# "kafka.network:type=*, name=*"
# "kafka.server:type=*, name=*"
# "kafka.controller:type=*, name=*"
# "kafka.databalancer:type=*, name=*"
# "kafka.log:type=*, name=*"
# "kafka.utils:type=*, name=*"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(Count|Value)
name: kafka_$1_$2_$3
# "kafka.producer:type=*, client-id=*"
# "kafka.producer:type=*, id=*"
# "kafka.rest:type=*, client-id=*"
# "kafka.rest:type=*, http-status-code=*"
# "kafka.server:type=*, BrokerId=*"
# "kafka.server:type=*, listener=*"
# "kafka.server:type=*, id=*"
- pattern: kafka.(\w+)<type=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2
cache: true
labels:
"$3": "$4"

# Broker Metrics
- pattern: "kafka.server<type=BrokerTopicMetrics, name=(MessagesInPerSec|BytesInPerSec|BytesOutPerSec|TotalProduceRequestsPerSec|TotalFetchRequestsPerSec), topic=(.+)><>(Count|OneMinuteRate|FiveMinuteRate|FifteenMinuteRate)"
name: kafka_server_brokertopicmetrics_$1_$3
type: GAUGE
cache: true
labels:
topic: "$2"

- pattern: "kafka.server<type=BrokerTopicMetrics, name=(MessagesInPerSec|BytesInPerSec|BytesOutPerSec)><>(Count|OneMinuteRate|FiveMinuteRate|FifteenMinuteRate)"
name: kafka_server_brokertopicmetrics_$1_$2_alltopics
type: GAUGE

# "kafka.server:type=raft-metrics"
- pattern: kafka.server<type=raft-metrics><>(.+):(.*)
name: kafka_server_raft_metrics_$1
type: GAUGE
cache: true

0 comments on commit 288381b

Please sign in to comment.