diff --git a/cyral/datadog_checks/cyral/cyral.py b/cyral/datadog_checks/cyral/cyral.py index 7f8c2a39ac..b6f151f842 100644 --- a/cyral/datadog_checks/cyral/cyral.py +++ b/cyral/datadog_checks/cyral/cyral.py @@ -51,7 +51,7 @@ def __init__(self, name, init_config, instances=None): 'namespace': self.NAMESPACE, 'metrics': [self.metrics_mapper], 'send_histograms_buckets': send_buckets, - 'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True) + 'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', True), # default to True to submit _count histogram/summary as monotonic # counts to Datadog } diff --git a/lighthouse/datadog_checks/lighthouse/lighthouse.py b/lighthouse/datadog_checks/lighthouse/lighthouse.py index f1cf25e1fc..86c610f968 100644 --- a/lighthouse/datadog_checks/lighthouse/lighthouse.py +++ b/lighthouse/datadog_checks/lighthouse/lighthouse.py @@ -25,7 +25,7 @@ def check(self, instance): raise CheckException("missing lighthouse instance url or name, please fix yaml") common_tags = instance.get("tags", []) - if type(common_tags) != list: + if isinstance(common_tags, list): self.log.warning("The tags list in the lighthouse check is not configured properly") common_tags = [] diff --git a/nvml/datadog_checks/nvml/api_pb2.py b/nvml/datadog_checks/nvml/api_pb2.py index 59608e0bbc..2680fbf99d 100644 --- a/nvml/datadog_checks/nvml/api_pb2.py +++ b/nvml/datadog_checks/nvml/api_pb2.py @@ -284,7 +284,7 @@ (_message.Message,), { 'DESCRIPTOR': _LISTPODRESOURCESREQUEST, - '__module__': 'api_pb2' + '__module__': 'api_pb2', # @@protoc_insertion_point(class_scope:v1alpha1.ListPodResourcesRequest) }, ) @@ -295,7 +295,7 @@ (_message.Message,), { 'DESCRIPTOR': _LISTPODRESOURCESRESPONSE, - '__module__': 'api_pb2' + '__module__': 'api_pb2', # @@protoc_insertion_point(class_scope:v1alpha1.ListPodResourcesResponse) }, ) @@ -306,7 +306,7 @@ (_message.Message,), { 'DESCRIPTOR': _PODRESOURCES, - '__module__': 'api_pb2' + '__module__': 'api_pb2', # @@protoc_insertion_point(class_scope:v1alpha1.PodResources) }, ) @@ -317,7 +317,7 @@ (_message.Message,), { 'DESCRIPTOR': _CONTAINERRESOURCES, - '__module__': 'api_pb2' + '__module__': 'api_pb2', # @@protoc_insertion_point(class_scope:v1alpha1.ContainerResources) }, ) @@ -328,7 +328,7 @@ (_message.Message,), { 'DESCRIPTOR': _CONTAINERDEVICES, - '__module__': 'api_pb2' + '__module__': 'api_pb2', # @@protoc_insertion_point(class_scope:v1alpha1.ContainerDevices) }, ) diff --git a/puma/datadog_checks/puma/puma.py b/puma/datadog_checks/puma/puma.py index d039a4a9c8..a6e2138878 100644 --- a/puma/datadog_checks/puma/puma.py +++ b/puma/datadog_checks/puma/puma.py @@ -24,7 +24,7 @@ def check(self, instance): response, content_type, version = self._perform_service_check(instance, control_url) metrics = self._extract_metrics(response) - for (key, name, reporter) in METRICS: + for key, name, reporter in METRICS: reporter(self, 'puma.{}'.format(name), metrics[key], tags) def _extract_metrics(self, response): diff --git a/riak_repl/datadog_checks/riak_repl/riak_repl.py b/riak_repl/datadog_checks/riak_repl/riak_repl.py index 4abe7a5397..272ab67f44 100644 --- a/riak_repl/datadog_checks/riak_repl/riak_repl.py +++ b/riak_repl/datadog_checks/riak_repl/riak_repl.py @@ -119,9 +119,8 @@ def check(self, instance): "riak_repl.realtime_queue_stats.consumers." + key, val, tags=tags + ['cluster:%s' % c] ) - if ( - self.exists(stats['sinks'], ['sink_stats', 'rt_sink_connected_to']) - and type(stats['sinks']['sink_stats']['rt_sink_connected_to']) is dict + if self.exists(stats['sinks'], ['sink_stats', 'rt_sink_connected_to']) and isinstance( + stats['sinks']['sink_stats']['rt_sink_connected_to'], dict ): for key, val in iteritems(stats['sinks']['sink_stats']['rt_sink_connected_to']): if key in self.REALTIME_SINK_CONN: diff --git a/scalr/datadog_checks/scalr/check.py b/scalr/datadog_checks/scalr/check.py index 2bd7320c7f..d660efbb47 100644 --- a/scalr/datadog_checks/scalr/check.py +++ b/scalr/datadog_checks/scalr/check.py @@ -90,7 +90,7 @@ def _get_account_id(self) -> str: res: dict = self._get_json(SCALR_FIND_ACCOUNT_ENDPOINT.format(self.url, domain_name)) data = res.get("data", []) - if type(data) is not list or len(data) != 1: + if not isinstance(data, list) or len(data) != 1: raise errors.CheckException("SCALR account not found.") acc_id = data[0]['id'] diff --git a/stardog/datadog_checks/stardog/stardog.py b/stardog/datadog_checks/stardog/stardog.py index 866f9e2ffd..f85f677239 100644 --- a/stardog/datadog_checks/stardog/stardog.py +++ b/stardog/datadog_checks/stardog/stardog.py @@ -116,7 +116,7 @@ def check(self, _): json_doc = response.json() try: tags = self.instance["tags"] - if type(tags) != list: + if not isinstance(tags, list): self.log.warning("The tags list in the Stardog check is not configured properly") tags = [] except KeyError: diff --git a/stardog/tests/test_stardog.py b/stardog/tests/test_stardog.py index 898a996b52..be933af64a 100644 --- a/stardog/tests/test_stardog.py +++ b/stardog/tests/test_stardog.py @@ -67,7 +67,7 @@ def test_check_all_metrics(aggregator): new_key = "stardog.%s" % metric_key metric_val = float(metric_value[next(iter(metric_value))]) aggregator.assert_metric(new_key, metric_type=0, count=1, value=metric_val, tags=local_tags) - aggregator.assert_all_metrics_covered + aggregator.assert_all_metrics_covered() class HttpServerThread(threading.Thread): diff --git a/storm/tests/common.py b/storm/tests/common.py index 27f5680d08..e10a9602a4 100644 --- a/storm/tests/common.py +++ b/storm/tests/common.py @@ -1,7 +1,6 @@ # (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) -# flake8: noqa E501 from datadog_checks.dev import get_docker_hostname @@ -243,7 +242,7 @@ "spouts": [ { "errorWorkerLogLink": "http://1.2.3.4:9006/log?file=my_topology-1-1489183263%2F6707%2Fworker.log", - "lastError": "com.rabbitmq.client.ShutdownSignalException: clean connection shutdown; protocol method: #method(reply-code=200, reply-text=OK, class-id=0, method-id=0)\n\tat com.rabbitmq.client.impl.", + "lastError": "com.rabbitmq.client.ShutdownSignalException: clean connection shutdown; protocol method: #method(reply-code=200, reply-text=OK, class-id=0, method-id=0)\n\tat com.rabbitmq.client.impl.", # noqa: E501 "acked": 104673, "errorLapsedSecs": 38737, "errorPort": 6707, @@ -374,9 +373,9 @@ "topology.worker.logwriter.childopts": "-Xmx64m", "storm.daemon.metrics.reporter.plugins": ["org.apache.storm.daemon.metrics.reporters.JmxPreparableReporter"], "pacemaker.auth.method": "NONE", - "resource.aware.scheduler.priority.strategy": "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy", + "resource.aware.scheduler.priority.strategy": "org.apache.storm.scheduler.resource.strategies.priority.DefaultSchedulingPriorityStrategy", # noqa: E501 "topology.executor.send.buffer.size": 1024, - "topology.scheduler.strategy": "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy", + "topology.scheduler.strategy": "org.apache.storm.scheduler.resource.strategies.scheduling.DefaultResourceAwareStrategy", # noqa: E501 "logviewer.port": 9006, "nimbus.code.sync.freq.secs": 120, "drpc.https.keystore.password": "", @@ -418,7 +417,7 @@ "topology.multilang.serializer": "org.apache.storm.multilang.JsonSerializer", "storm.messaging.netty.server_worker_threads": 1, "nimbus.blobstore.class": "org.apache.storm.blobstore.LocalFsBlobStore", - "resource.aware.scheduler.eviction.strategy": "org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy", + "resource.aware.scheduler.eviction.strategy": "org.apache.storm.scheduler.resource.strategies.eviction.DefaultEvictionStrategy", # noqa: E501 "topology.max.error.report.per.interval": 5, "storm.thrift.transport": "org.apache.storm.security.auth.SimpleTransportPlugin", "zmq.hwm": 0, @@ -446,7 +445,7 @@ "storm.group.mapping.service.cache.duration.secs": 120, "topology.testing.always.try.serialize": False, "nimbus.monitor.freq.secs": 10, - "worker.childops": "-Xmx2048m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", + "worker.childops": "-Xmx2048m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", # noqa: E501 "storm.health.check.timeout.ms": 10000, "supervisor.supervisors": [], "topology.tasks": None, @@ -455,7 +454,7 @@ "topology.workers": 6, "pacemaker.base.threads": 10, "storm.local.dir": "/var/lib/storm/data", - "worker.childopts": "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", + "worker.childopts": "-Xmx%HEAP-MEM%m -XX:+PrintGCDetails -Xloggc:artifacts/gc.log -XX:+PrintGCDateStamps -XX:+PrintGCTimeStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=artifacts/heapdump", # noqa: E501 "storm.auth.simple-white-list.users": [], "topology.disruptor.batch.timeout.millis": 1, "topology.message.timeout.secs": 300,