Skip to content

Commit

Permalink
Fix lint issues in integrations-extras (#2514)
Browse files Browse the repository at this point in the history
* Lint Cyral

* Lint Lighthouse

* Lint NVML

* Lint Puma

* Lint riak_repl

* Lint Scalr

* Lint Stardog

* Lint Storm
  • Loading branch information
Kyle-Neale authored Oct 15, 2024
1 parent cf15f75 commit edaf036
Show file tree
Hide file tree
Showing 9 changed files with 19 additions and 21 deletions.
2 changes: 1 addition & 1 deletion cyral/datadog_checks/cyral/cyral.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(self, name, init_config, instances=None):
'namespace': self.NAMESPACE,
'metrics': [self.metrics_mapper],
'send_histograms_buckets': send_buckets,
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True)
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True),
# default to True to submit _count histogram/summary as monotonic
# counts to Datadog
}
Expand Down
2 changes: 1 addition & 1 deletion lighthouse/datadog_checks/lighthouse/lighthouse.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def check(self, instance):
raise CheckException("missing lighthouse instance url or name, please fix yaml")

common_tags = instance.get("tags", [])
if type(common_tags) != list:
if isinstance(common_tags, list):
self.log.warning("The tags list in the lighthouse check is not configured properly")
common_tags = []

Expand Down
10 changes: 5 additions & 5 deletions nvml/datadog_checks/nvml/api_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion puma/datadog_checks/puma/puma.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def check(self, instance):
response, content_type, version = self._perform_service_check(instance, control_url)
metrics = self._extract_metrics(response)

for (key, name, reporter) in METRICS:
for key, name, reporter in METRICS:
reporter(self, 'puma.{}'.format(name), metrics[key], tags)

def _extract_metrics(self, response):
Expand Down
5 changes: 2 additions & 3 deletions riak_repl/datadog_checks/riak_repl/riak_repl.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,8 @@ def check(self, instance):
"riak_repl.realtime_queue_stats.consumers." + key, val, tags=tags + ['cluster:%s' % c]
)

if (
self.exists(stats['sinks'], ['sink_stats', 'rt_sink_connected_to'])
and type(stats['sinks']['sink_stats']['rt_sink_connected_to']) is dict
if self.exists(stats['sinks'], ['sink_stats', 'rt_sink_connected_to']) and isinstance(
stats['sinks']['sink_stats']['rt_sink_connected_to'], dict
):
for key, val in iteritems(stats['sinks']['sink_stats']['rt_sink_connected_to']):
if key in self.REALTIME_SINK_CONN:
Expand Down
2 changes: 1 addition & 1 deletion scalr/datadog_checks/scalr/check.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def _get_account_id(self) -> str:

res: dict = self._get_json(SCALR_FIND_ACCOUNT_ENDPOINT.format(self.url, domain_name))
data = res.get("data", [])
if type(data) is not list or len(data) != 1:
if not isinstance(data, list) or len(data) != 1:
raise errors.CheckException("SCALR account not found.")

acc_id = data[0]['id']
Expand Down
2 changes: 1 addition & 1 deletion stardog/datadog_checks/stardog/stardog.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def check(self, _):
json_doc = response.json()
try:
tags = self.instance["tags"]
if type(tags) != list:
if not isinstance(tags, list):
self.log.warning("The tags list in the Stardog check is not configured properly")
tags = []
except KeyError:
Expand Down
2 changes: 1 addition & 1 deletion stardog/tests/test_stardog.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test_check_all_metrics(aggregator):
new_key = "stardog.%s" % metric_key
metric_val = float(metric_value[next(iter(metric_value))])
aggregator.assert_metric(new_key, metric_type=0, count=1, value=metric_val, tags=local_tags)
aggregator.assert_all_metrics_covered
aggregator.assert_all_metrics_covered()


class HttpServerThread(threading.Thread):
Expand Down
13 changes: 6 additions & 7 deletions storm/tests/common.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# flake8: noqa E501

from datadog_checks.dev import get_docker_hostname

Expand Down Expand Up @@ -243,7 +242,7 @@
"spouts": [
{
"errorWorkerLogLink": "http://1.2.3.4:9006/log?file=my_topology-1-1489183263%2F6707%2Fworker.log",
"lastError": "com.rabbitmq.client.ShutdownSignalException: clean connection shutdown; protocol method: #method<connection.close>(reply-code=200, reply-text=OK, class-id=0, method-id=0)\n\tat com.rabbitmq.client.impl.",
"lastError": "com.rabbitmq.client.ShutdownSignalException: clean connection shutdown; protocol method: #method<connection.close>(reply-code=200, reply-text=OK, class-id=0, method-id=0)\n\tat com.rabbitmq.client.impl.", # noqa: E501
"acked": 104673,
"errorLapsedSecs": 38737,
"errorPort": 6707,
Expand Down Expand Up @@ -374,9 +373,9 @@
"topology.worker.logwriter.childopts": "-Xmx64m",
"storm.daemon.metrics.reporter.plugins": ["org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter"],
"pacemaker.auth.method": "NONE",
"resource.aware.scheduler.priority.strategy": "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy",
"resource.aware.scheduler.priority.strategy": "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy", # noqa: E501
"topology.executor.send.buffer.size": 1024,
"topology.scheduler.strategy": "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy",
"topology.scheduler.strategy": "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy", # noqa: E501
"logviewer.port": 9006,
"nimbus.code.sync.freq.secs": 120,
"drpc.https.keystore.password": "",
Expand Down Expand Up @@ -418,7 +417,7 @@
"topology.multilang.serializer": "org.apache.storm.multilang.JsonSerializer",
"storm.messaging.netty.server_worker_threads": 1,
"nimbus.blobstore.class": "org.apache.storm.blobstore.LocalFsBlobStore",
"resource.aware.scheduler.eviction.strategy": "org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy",
"resource.aware.scheduler.eviction.strategy": "org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy", # noqa: E501
"topology.max.error.report.per.interval": 5,
"storm.thrift.transport": "org.apache.storm.security.auth.SimpleTransportPlugin",
"zmq.hwm": 0,
Expand Down Expand Up @@ -446,7 +445,7 @@
"storm.group.mapping.service.cache.duration.secs": 120,
"topology.testing.always.try.serialize": False,
"nimbus.monitor.freq.secs": 10,
"worker.childops": "-Xmx2048m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump",
"worker.childops": "-Xmx2048m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", # noqa: E501
"storm.health.check.timeout.ms": 10000,
"supervisor.supervisors": [],
"topology.tasks": None,
Expand All @@ -455,7 +454,7 @@
"topology.workers": 6,
"pacemaker.base.threads": 10,
"storm.local.dir": "/var/lib/storm/data",
"worker.childopts": "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump",
"worker.childopts": "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", # noqa: E501
"storm.auth.simple-white-list.users": [],
"topology.disruptor.batch.timeout.millis": 1,
"topology.message.timeout.secs": 300,
Expand Down

0 comments on commit edaf036

Please sign in to comment.