feat(monitoring): refactor (#555)
* chore(kube-prometheus-stack): update to latest
* fix: KubeJobFailed should be SEV-3
* chore: refactor softnet alerts
* chore(monitoring): migrate to using jsonnet
* chore: refactor alerts
* chore: major monitoring refactor
* fix: solve alerts
* fix: apiserver selector
* more cleanups
* switch from SEV- to P
* fix: improve port binding alerts
* fix admin state alert for neutron
* map some more alerts
* drop uuid
* Revert "drop uuid"
This reverts commit ad0f05d0e7564759e8259c2cc53c2e2f5c73e1b8.
* fix: drop recording rules
* switch alertmanager to jsonnet
* fix: idempotence for monitoring
* chore: fix linters
diff --git a/roles/defaults/vars/main.yml b/roles/defaults/vars/main.yml
index e4ca646..9d5f503 100644
--- a/roles/defaults/vars/main.yml
+++ b/roles/defaults/vars/main.yml
@@ -13,7 +13,7 @@
# under the License.
_atmosphere_images:
- alertmanager: quay.io/prometheus/alertmanager:v0.24.0
+ alertmanager: quay.io/prometheus/alertmanager:v0.26.0
barbican_api: quay.io/vexxhost/barbican@sha256:fde302ee731cca6019feaf87400f5a377c3e38f459bc88d4c7677f2967e0939b # image-source: quay.io/vexxhost/barbican:zed
barbican_db_sync: quay.io/vexxhost/barbican@sha256:fde302ee731cca6019feaf87400f5a377c3e38f459bc88d4c7677f2967e0939b # image-source: quay.io/vexxhost/barbican:zed
bootstrap: quay.io/vexxhost/heat@sha256:755225f9a63c0968f1ceeda3a2f06c66dd8d247ff00308f549e66496aa8f59d0 # image-source: quay.io/vexxhost/heat:zed
@@ -58,8 +58,8 @@
glance_metadefs_load: quay.io/vexxhost/glance@sha256:32aaf33e83f7285e2ad04cb8e692068e072d9d852b6b2625742995724ec77508 # image-source: quay.io/vexxhost/glance:zed
glance_registry: quay.io/vexxhost/glance@sha256:32aaf33e83f7285e2ad04cb8e692068e072d9d852b6b2625742995724ec77508 # image-source: quay.io/vexxhost/glance:zed
glance_storage_init: quay.io/vexxhost/glance@sha256:32aaf33e83f7285e2ad04cb8e692068e072d9d852b6b2625742995724ec77508 # image-source: quay.io/vexxhost/glance:zed
- grafana_sidecar: quay.io/kiwigrid/k8s-sidecar:1.19.2
- grafana: docker.io/grafana/grafana:9.2.3
+ grafana_sidecar: quay.io/kiwigrid/k8s-sidecar:1.24.6
+ grafana: docker.io/grafana/grafana:10.1.0
haproxy: docker.io/library/haproxy:2.5
heat_api: quay.io/vexxhost/heat@sha256:755225f9a63c0968f1ceeda3a2f06c66dd8d247ff00308f549e66496aa8f59d0 # image-source: quay.io/vexxhost/heat:zed
heat_cfn: quay.io/vexxhost/heat@sha256:755225f9a63c0968f1ceeda3a2f06c66dd8d247ff00308f549e66496aa8f59d0 # image-source: quay.io/vexxhost/heat:zed
@@ -91,7 +91,7 @@
kube_etcd: registry.k8s.io/etcd:3.5.6-0
kube_proxy: registry.k8s.io/kube-proxy:v1.22.17
kube_scheduler: registry.k8s.io/kube-scheduler:v1.22.17
- kube_state_metrics: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.6.0
+ kube_state_metrics: registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.9.2
kubectl: docker.io/bitnami/kubectl@sha256:bd420268ae3424b3ab3174e26b895fd8dc464589a8cd62654b9aa739d00ff280 # image-source: docker.io/bitnami/kubectl:latest
libvirt: quay.io/vexxhost/libvirtd@sha256:d400204e0332dc815827e5902038a1c672446c58633ba97ede9e20f8ae9a2349 # image-source: quay.io/vexxhost/libvirtd:yoga-focal
local_path_provisioner_helper: docker.io/library/busybox:1.36.0
@@ -161,17 +161,17 @@
percona_xtradb_cluster: docker.io/percona/percona-xtradb-cluster:5.7.39-31.61
placement_db_sync: quay.io/vexxhost/placement@sha256:ef2f95bdc3c88504b3a2297bf314728788126008f9bafba337e53d48a7dd19c3 # image-source: quay.io/vexxhost/placement:zed
placement: quay.io/vexxhost/placement@sha256:ef2f95bdc3c88504b3a2297bf314728788126008f9bafba337e53d48a7dd19c3 # image-source: quay.io/vexxhost/placement:zed
- prometheus_config_reloader: quay.io/prometheus-operator/prometheus-config-reloader:v0.60.1
+ prometheus_config_reloader: quay.io/prometheus-operator/prometheus-config-reloader:v0.67.1
prometheus_ethtool_exporter: quay.io/vexxhost/ethtool-exporter:5f05120a743a71adcbceb9f8ee1d43ecc7c4183a
prometheus_ipmi_exporter: us-docker.pkg.dev/vexxhost-infra/openstack/ipmi-exporter:1.4.0
prometheus_memcached_exporter: quay.io/prometheus/memcached-exporter:v0.10.0
prometheus_mysqld_exporter: quay.io/prometheus/mysqld-exporter:v0.14.0
- prometheus_node_exporter: quay.io/prometheus/node-exporter:v1.3.1
+ prometheus_node_exporter: quay.io/prometheus/node-exporter:v1.6.1
prometheus_openstack_exporter: ghcr.io/openstack-exporter/openstack-exporter:1.6.0
- prometheus_operator_kube_webhook_certgen: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v1.3.0
- prometheus_operator: quay.io/prometheus-operator/prometheus-operator:v0.60.1
+ prometheus_operator_kube_webhook_certgen: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6
+ prometheus_operator: quay.io/prometheus-operator/prometheus-operator:v0.67.1
prometheus_pushgateway: docker.io/prom/pushgateway:v1.4.2
- prometheus: quay.io/prometheus/prometheus:v2.39.1
+ prometheus: quay.io/prometheus/prometheus:v2.46.0
rabbit_init: docker.io/library/rabbitmq:3.10.2-management
rabbitmq_cluster_operator: docker.io/rabbitmqoperator/cluster-operator:1.13.1
rabbitmq_credential_updater: docker.io/rabbitmqoperator/default-user-credential-updater:1.0.2
diff --git a/roles/kube_prometheus_stack/README.md b/roles/kube_prometheus_stack/README.md
index ee45fa8..62beb57 100644
--- a/roles/kube_prometheus_stack/README.md
+++ b/roles/kube_prometheus_stack/README.md
@@ -2,16 +2,69 @@
There is a Grafana deployemnt with a few dashboards that are created by default
and a Prometheus deployment that is used to collect metrics from the cluster
-which sends alerts to AlertManager. In addition, Loki is deployed to collect
+which sends alerts to AlertManager. In addition, Loki is deployed to collect
logs from the cluster using Vector.
+## Philosophy
+
+Atmosphere's monitoring philosophy is strongly aligned with the principles
+outlined in the Google Site Reliability Engineering (SRE) book. Our approach
+focuses on alerting on conditions that are symptomatic of issues which directly
+impact the service or system health, rather than simply monitoring the state of
+individual components.
+
+### Severity Levels
+
+Our alerting system classifies incidents into different severity levels based on
+their impact on the system and users.
+
+- **P1** (Critical): This level is used for incidents causing a complete
+ service disruption or significant loss of functionality across the entire
+ Atmosphere platform. Immediate response, attention, and action are necessary
+ regardless of business hours.
+
+- **P2** (High): This level is for incidents that affect a large group of users
+ or critical system components. These incidents require swift attention and
+ action, regardless of business hours, but do not cause a total disruption.
+
+- **P3** (Moderate): This level is for incidents that affect a smaller group of
+ users or a single system. These incidents require attention and may necessitate
+ action during business hours.
+
+- **P4** (Low): This level is used for minor issues that have a limited impact
+ on a small subset of users or system functionality. These incidents require
+ attention and action, if necessary, during standard business hours.
+
+- **P5** (Informational): This is the lowest level of severity, used for
+ providing information about normal system activities or minor issues that
+ don't significantly impact users or system functionality. These incidents
+ typically do not require immediate attention or action and are addressed
+ during standard business hours.
+
+### Alerting Philosophy
+
+Our alerting philosophy aims to alert the right people at the right time. Most
+alerts, if they are affecting a single system, would trigger a lower priority
+level (P4 or P5). However, if an issue is affecting the entire control plane of
+a specific service, it might escalate to a P3 or P2. And if the whole service
+is unavailable, it becomes a P1.
+
+We believe in minimizing alert noise to ensure that alerts are meaningful and
+actionable. Our goal is to have every alert provide enough information to
+initiate an immediate and effective response, regardless of business hours for
+high priority alerts.
+
+We continue to refine our monitoring and alerting strategies to ensure that we
+are effectively identifying and responding to incidents. The ultimate goal is
+to provide a reliable and high-quality service to all our users.
+
## Viewing data
By default, an `Ingress` is created for Grafana using the `kube_prometheus_stack_grafana_host`
-variable. The default login is `admin` and the password is the value of
+variable. The default login is `admin` and the password is the value of
`kube_prometheus_stack_grafana_admin_password`.
-You can view the existing dashboards by going to _Manage_ > _Dashboards_. You
+You can view the existing dashboards by going to _Manage_ > _Dashboards_. You
can also check any alerts that are currently firing by going to _Alerting_ >
_Alerts_.
@@ -30,7 +83,7 @@
2. Copy the API key that is generated for you and setup correct assignment
rules inside OpsGenie.
3. Create a new heartbeat inside OpsGenie, you can do this by going to
- _Settings_ > _Heartbeats_ > _Create Heartbeat_. Set the interval to 1 minute.
+ _Settings_ > _Heartbeats_ > _Create Heartbeat_. Set the interval to 1 minute.
Afterwards, you can configure the following options for the Atmosphere config:
@@ -38,24 +91,9 @@
kube_prometheus_stack_helm_values:
alertmanager:
config:
- route:
- group_by:
- - alertname
- - severity
- receiver: opsgenie
- routes:
- - receiver: "null"
- matchers:
- - alertname = "InfoInhibitor"
- - receiver: heartbeat
- group_wait: 0s
- group_interval: 30s
- repeat_interval: 15s
- matchers:
- - alertname = "Watchdog"
receivers:
- name: "null"
- - name: opsgenie
+ - name: notifier
opsgenie_configs:
- api_key: API_KEY
message: >-
@@ -64,14 +102,14 @@
{%- endraw %}
priority: >-
{% raw -%}
- {{ if eq .GroupLabels.severity "critical" -}}
+ {{- if eq .GroupLabels.severity "critical" -}}
P1
{{- else if eq .GroupLabels.severity "warning" -}}
P3
{{- else if eq .GroupLabels.severity "info" -}}
P5
{{- else -}}
- P3
+ {{ .GroupLabels.severity }}
{{- end -}}
{%- endraw %}
description: |-
@@ -121,7 +159,7 @@
traffic which can be a sign of a misconfigured network or a malicious actor.
This can result in high CPU usage on the node and can cause the node to become
-unresponsive. Also, it can be the cause of a very high amount of software
+unresponsive. Also, it can be the cause of a very high amount of software
interrupts on the node.
In order to find the root cause of this issue, you can use the following
@@ -132,7 +170,7 @@
```
With the command above, you're able to see which IP addresses are sending the
-multicast traffic. Once you have the IP address, you can use the following
+multicast traffic. Once you have the IP address, you can use the following
command to find the server behind it:
```console
diff --git a/roles/kube_prometheus_stack/files/jsonnet/ceph.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/ceph.libsonnet
deleted file mode 100644
index b803d02..0000000
--- a/roles/kube_prometheus_stack/files/jsonnet/ceph.libsonnet
+++ /dev/null
@@ -1,32 +0,0 @@
-local ceph = import 'vendor/github.com/ceph/ceph/monitoring/ceph-mixin/mixin.libsonnet';
-
-local DISABLED_NODE_ALERTS = [
- // * Dropped `CephNodeDiskspaceWarning` because we already have a
- // few alerts like `NodeFilesystemSpaceFillingUp`, etc.
- 'CephNodeDiskspaceWarning',
-
- // * Dropped `CephNodeNetworkPacketDrops` due to noisy alerts with
- // no actionable items to fix it.
- 'CephNodeNetworkPacketDrops',
-];
-
-local disableAlerts = {
- prometheusAlerts+:: {
- groups: std.map(
- function(group)
- if group.name == 'nodes' then
- group {
- rules: std.filter(
- function(rule)
- std.setMember(rule.alert, DISABLED_NODE_ALERTS) == false,
- group.rules
- ),
- }
- else
- group,
- super.groups
- ),
- },
-};
-
-(ceph + disableAlerts)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.json b/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.json
index 4e393e3..5da830d 100644
--- a/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.json
+++ b/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.json
@@ -22,6 +22,15 @@
{
"source": {
"git": {
+ "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
+ "subdir": ""
+ }
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
"remote": "https://github.com/povilasv/coredns-mixin.git",
"subdir": ""
}
@@ -31,11 +40,29 @@
{
"source": {
"git": {
+ "remote": "https://github.com/prometheus/alertmanager.git",
+ "subdir": "doc/alertmanager-mixin"
+ }
+ },
+ "version": "main"
+ },
+ {
+ "source": {
+ "git": {
"remote": "https://github.com/prometheus/mysqld_exporter.git",
"subdir": "mysqld-mixin"
}
},
"version": "main"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/prometheus/node_exporter.git",
+ "subdir": "docs/node-mixin"
+ }
+ },
+ "version": "master"
}
],
"legacyImports": true
diff --git a/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.lock.json b/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.lock.json
index 56ba27a..db906e8 100644
--- a/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.lock.json
+++ b/roles/kube_prometheus_stack/files/jsonnet/jsonnetfile.lock.json
@@ -24,6 +24,16 @@
{
"source": {
"git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet-7.0"
+ }
+ },
+ "version": "a1d61cce1da59c71409b99b5c7568511fec661ea",
+ "sum": "gCtR9s/4D5fxU9aKXg0Bru+/njZhA0YjLjPiASc61FM="
+ },
+ {
+ "source": {
+ "git": {
"remote": "https://github.com/grafana/jsonnet-libs.git",
"subdir": "grafana-builder"
}
@@ -44,6 +54,16 @@
{
"source": {
"git": {
+ "remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin.git",
+ "subdir": ""
+ }
+ },
+ "version": "63337d921db856bbcd2e91814a0ac90c250410d6",
+ "sum": "x8/bMVUaNMZEh6mcwhLmTlBJnaleRqhhV+w/+h0H0Pc="
+ },
+ {
+ "source": {
+ "git": {
"remote": "https://github.com/povilasv/coredns-mixin.git",
"subdir": ""
}
@@ -54,12 +74,32 @@
{
"source": {
"git": {
+ "remote": "https://github.com/prometheus/alertmanager.git",
+ "subdir": "doc/alertmanager-mixin"
+ }
+ },
+ "version": "6cbe2eb21f7cd770d8f247c545941f3a8c97f6a0",
+ "sum": "1d7ZKYArJKacAWXLUz0bRC1uOkozee/PPw97/W5zGhc="
+ },
+ {
+ "source": {
+ "git": {
"remote": "https://github.com/prometheus/mysqld_exporter.git",
"subdir": "mysqld-mixin"
}
},
"version": "503f1fa222f0afc74a1dcf4a0ef5a7c2dfa4d105",
"sum": "G69++5ExKgQ9niW0Owmw0orc8voP0Qll2WZJ1fHAqzE="
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/prometheus/node_exporter.git",
+ "subdir": "docs/node-mixin"
+ }
+ },
+ "version": "381f32b1c5943afb35940b88c45c3fa4bf5fc1de",
+ "sum": "By6n6U10hYDogUsyhsaKZehbhzxBZZobJloiKyKadgM="
}
],
"legacyImports": false
diff --git a/roles/kube_prometheus_stack/files/jsonnet/legacy.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/legacy.libsonnet
index a6126f6..649c95e 100644
--- a/roles/kube_prometheus_stack/files/jsonnet/legacy.libsonnet
+++ b/roles/kube_prometheus_stack/files/jsonnet/legacy.libsonnet
@@ -1,524 +1,366 @@
{
'ethtool-exporter': {
- groups: [
- {
- name: 'rules',
- rules: [
- {
- alert: 'EthernetReceiveDiscards',
- expr: 'rate(node_net_ethtool{type="rx_discards"}[1m]) > 0',
- labels: {
- severity: 'warning',
+ prometheusAlerts+:: {
+ groups: [
+ {
+ name: 'rules',
+ rules: [
+ {
+ alert: 'EthernetReceiveDiscards',
+ expr: 'rate(node_net_ethtool{type="rx_discards"}[1m]) > 0',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- ],
- },
- ],
+ ],
+ },
+ ],
+ },
},
'ipmi-exporter': {
- groups: [
- {
- name: 'rules',
- rules: [
- {
- alert: 'IpmiCollectorDown',
- expr: 'ipmi_up == 0',
- },
- ],
- },
- {
- name: 'collectors-state-warning',
- rules: [
- {
- alert: 'IpmiCurrent',
- expr: 'ipmi_current_state == 1',
- labels: {
- severity: 'warning',
+ prometheusAlerts+:: {
+ groups: [
+ {
+ name: 'rules',
+ rules: [
+ {
+ alert: 'IpmiCollectorDown',
+ expr: 'ipmi_up == 0',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiFanSpeed',
- expr: 'ipmi_fan_speed_state == 1',
- labels: {
- severity: 'warning',
+ ],
+ },
+ {
+ name: 'collectors-state-warning',
+ rules: [
+ {
+ alert: 'IpmiCurrent',
+ expr: 'ipmi_current_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiPower',
- expr: 'ipmi_power_state == 1',
- labels: {
- severity: 'warning',
+ {
+ alert: 'IpmiFanSpeed',
+ expr: 'ipmi_fan_speed_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiSensor',
- expr: 'ipmi_sensor_state == 1',
- labels: {
- severity: 'warning',
+ {
+ alert: 'IpmiPower',
+ expr: 'ipmi_power_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiTemperature',
- expr: 'ipmi_temperature_state == 1',
- labels: {
- severity: 'warning',
+ {
+ alert: 'IpmiSensor',
+ expr: 'ipmi_sensor_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiVoltage',
- expr: 'ipmi_voltage_state == 1',
- labels: {
- severity: 'warning',
+ {
+ alert: 'IpmiTemperature',
+ expr: 'ipmi_temperature_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- ],
- },
- {
- name: 'collectors-state-critical',
- rules: [
- {
- alert: 'IpmiCurrent',
- expr: 'ipmi_current_state == 2',
- labels: {
- severity: 'critical',
+ {
+ alert: 'IpmiVoltage',
+ expr: 'ipmi_voltage_state == 1',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'IpmiFanSpeed',
- expr: 'ipmi_fan_speed_state == 2',
- labels: {
- severity: 'critical',
+ ],
+ },
+ {
+ name: 'collectors-state-critical',
+ rules: [
+ {
+ alert: 'IpmiCurrent',
+ expr: 'ipmi_current_state == 2',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'IpmiPower',
- expr: 'ipmi_power_state == 2',
- labels: {
- severity: 'critical',
+ {
+ alert: 'IpmiFanSpeed',
+ expr: 'ipmi_fan_speed_state == 2',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'IpmiSensor',
- expr: 'ipmi_sensor_state{name!="TPM Presence"} == 2',
- labels: {
- severity: 'critical',
+ {
+ alert: 'IpmiPower',
+ expr: 'ipmi_power_state == 2',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'IpmiTemperature',
- expr: 'ipmi_temperature_state == 2',
- labels: {
- severity: 'critical',
+ {
+ alert: 'IpmiSensor',
+ expr: 'ipmi_sensor_state{name!="TPM Presence"} == 2',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'IpmiVoltage',
- expr: 'ipmi_voltage_state == 2',
- labels: {
- severity: 'critical',
+ {
+ alert: 'IpmiTemperature',
+ expr: 'ipmi_temperature_state == 2',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- ],
- },
- ],
+ {
+ alert: 'IpmiVoltage',
+ expr: 'ipmi_voltage_state == 2',
+ labels: {
+ severity: 'critical',
+ },
+ },
+ ],
+ },
+ ],
+ },
},
'node-exporter-local': {
- groups: [
- {
- name: 'node',
- rules: [
- {
- alert: 'NodeHighLoadAverage',
- expr: 'node_load5 / count(node_cpu_seconds_total{mode="system"}) without (cpu, mode) > 1.5',
- 'for': '30m',
- labels: {
- severity: 'warning',
+ prometheusRules+:: {
+ groups: [
+ {
+ name: 'softnet.rules',
+ rules:
+ local recordingRule(metric, expr) = {
+ record: 'node:softnet:' + metric + ':1m',
+ expr: expr,
+ };
+ [
+ recordingRule('backlog', 'sum(node_softnet_backlog_len) by (instance)'),
+ recordingRule('squeezed', 'sum(rate(node_softnet_times_squeezed_total[1m])) by (instance)'),
+ recordingRule('dropped', 'sum(rate(node_softnet_dropped_total[1m])) by (instance)'),
+ ],
+ },
+ ],
+ },
+ prometheusAlerts+:: {
+ groups: [
+ {
+ name: 'node',
+ rules: [
+ {
+ alert: 'NodeHighLoadAverage',
+ expr: 'node_load5 / count(node_cpu_seconds_total{mode="system"}) without (cpu, mode) > 1.5',
+ 'for': '30m',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'NodeHighMemoryUsage',
- expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 < 2.5',
- 'for': '2m',
- labels: {
- severity: 'critical',
+ {
+ alert: 'NodeHighMemoryUsage',
+ expr: '(node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes) * 100 < 2.5',
+ 'for': '2m',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'NodeHighCpuUsage',
- expr: "sum by(instance)(irate(node_cpu_seconds_total{mode='idle'}[5m])) < 1",
- 'for': '2m',
- labels: {
- severity: 'warning',
+ {
+ alert: 'NodeHighCpuUsage',
+ expr: "sum by(instance)(irate(node_cpu_seconds_total{mode='idle'}[5m])) < 1",
+ 'for': '2m',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'NodeLowEntropy',
- expr: 'node_entropy_available_bits / node_entropy_pool_size_bits < 0.20',
- 'for': '5m',
- labels: {
- severity: 'warning',
+ {
+ alert: 'NodeLowEntropy',
+ expr: 'node_entropy_available_bits / node_entropy_pool_size_bits < 0.20',
+ 'for': '5m',
+ labels: {
+ severity: 'P5',
+ },
},
- },
- {
- alert: 'NodeNonLTSKernel',
- expr: 'node_uname_info{release!~"^5.(4|15).*"}',
- labels: {
- severity: 'warning',
+ {
+ alert: 'NodeNonLTSKernel',
+ expr: 'node_uname_info{release!~"^5.(4|15).*"}',
+ labels: {
+ severity: 'P5',
+ },
},
- },
- ],
- },
- {
- name: 'network',
- rules: [
- {
- alert: 'NodeNetworkMulticast',
- expr: 'rate(node_network_receive_multicast_total[1m]) > 1000',
- 'for': '5m',
- labels: {
- severity: 'critical',
+ ],
+ },
+ {
+ name: 'network',
+ rules: [
+ {
+ alert: 'NodeNetworkMulticast',
+ expr: 'rate(node_network_receive_multicast_total[1m]) > 1000',
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'High multicast traffic on node {{ $labels.instance }}: {{ $value }} packets/sec',
+ description: 'This can result in high software interrupt load on the node which can bring network performance down.',
+ runbook_url: 'https://github.com/vexxhost/atmosphere/tree/main/roles/kube_prometheus_stack#NodeNetworkMulticast',
+ },
},
- annotations: {
- summary: 'High multicast traffic on node {{ $labels.instance }}: {{ $value }} packets/sec',
- description: 'This can result in high software interrupt load on the node which can bring network performance down.',
- runbook_url: 'https://github.com/vexxhost/atmosphere/tree/main/roles/kube_prometheus_stack#NodeNetworkMulticast'
- },
- },
- ],
- },
- {
- name: 'softnet',
- rules: [
- {
- alert: 'NodeSoftNetBacklogLength',
- expr: 'sum(node_softnet_backlog_len) by (instance) > 5000',
- 'for': '1m',
- labels: {
- severity: 'critical',
- },
- },
- {
- alert: 'NodeSoftNetDrops',
- expr: 'sum(rate(node_softnet_dropped_total[1m])) by (instance) != 0',
- 'for': '1m',
- labels: {
- severity: 'critical',
- },
- },
- {
- alert: 'NodeSoftNetTimesSqueezed',
- expr: 'sum(rate(node_softnet_times_squeezed_total[1m])) by (instance) > 10',
- 'for': '10m',
- labels: {
- severity: 'warning',
- },
- },
- ],
- },
- ],
- },
- 'openstack-exporter': {
- groups: [
- {
- name: 'cinder',
- rules: [
- {
- alert: 'CinderAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_cinder_agent_state != 1',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'CinderAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down for 5 minutes. This can affect volume operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_cinder_agent_state != 1',
- 'for': '5m',
- labels: {
- severity: 'critical',
- },
- },
- {
- alert: 'CinderAgentDisabled',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} has been disabled for 60 minutes. This can affect volume operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} disabled',
- },
- expr: 'openstack_cinder_agent_state{adminState!="enabled"}',
- 'for': '1h',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'CinderVolumeInError',
- annotations: {
- description: 'The volume {{ $labels.id }} has been in ERROR state for over 24 hours. It must be cleaned up or removed in order to provide a consistent customer experience.',
- summary: '[{{ $labels.id }}] Volume in ERROR statef endraw %}',
- },
- expr: 'openstack_cinder_volume_status{status=~"error.*"}',
- 'for': '24h',
- labels: {
- severity: 'warning',
- },
- },
- ],
- },
- {
- name: 'neutron',
- rules: [
- {
- alert: 'NeutronAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_neutron_agent_state != 1',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NeutronAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down for 5 minutes. This can affect network operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_neutron_agent_state != 1',
- 'for': '5m',
- labels: {
- severity: 'critical',
- },
- },
- {
- alert: 'NeutronAgentDisabled',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} has been disabled for 60 minutes. This can affect network operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} disabled',
- },
- expr: 'openstack_neutron_agent_state{adminState!="up"}',
- 'for': '1h',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NeutronBindingFailedPorts',
- annotations: {
- description: 'The NIC {{ $labels.mac_address }} of {{ $labels.device_owner }} has binding failed port now.',
- summary: '[{{ $labels.device_owner }}] {{ $labels.mac_address }} binding failed',
- },
- expr: 'openstack_neutron_port{binding_vif_type="binding_failed"} != 0',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NeutronNetworkOutOfIPs',
- annotations: {
- description: 'The subnet {{ $labels.subnet_name }} within {{ $labels.network_name }} is currently at {{ $value }}% utilization. If the IP addresses run out, it will impact the provisioning of new ports.',
- summary: '[{{ $labels.network_name }}] {{ $labels.subnet_name }} running out of IPs',
- },
- expr: 'sum by (network_id) (openstack_neutron_network_ip_availabilities_used{project_id!=""}) / sum by (network_id) (openstack_neutron_network_ip_availabilities_total{project_id!=""}) * 100 > 80',
- labels: {
- severity: 'warning',
- },
- },
- ],
- },
- {
- name: 'nova',
- rules: [
- {
- alert: 'NovaAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_nova_agent_state != 1',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NovaAgentDown',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} is being reported as down. This can affect compute operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} down',
- },
- expr: 'openstack_nova_agent_state != 1',
- 'for': '5m',
- labels: {
- severity: 'critical',
- },
- },
- {
- alert: 'NovaAgentDisabled',
- annotations: {
- description: 'The service {{ $labels.exported_service }} running on {{ $labels.hostname }} has been disabled for 60 minutes. This can affect compute operations so it must be resolved as quickly as possible.',
- summary: '[{{ $labels.hostname }}] {{ $labels.exported_service }} disabled',
- },
- expr: 'openstack_nova_agent_state{adminState!="enabled"}',
- 'for': '1h',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NovaInstanceInError',
- annotations: {
- description: 'The instance {{ $labels.id }} has been in ERROR state for over 24 hours. It must be cleaned up or removed in order to provide a consistent customer experience.',
- summary: '[{{ $labels.id }}] Instance in ERROR state',
- },
- expr: 'openstack_nova_server_status{status="ERROR"}',
- 'for': '24h',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NovaFailureRisk',
- annotations: {
- description: 'The cloud capacity will be at {{ $value }} in the event of the failure of a single hypervisor which puts the cloud at risk of not being able to recover should any hypervisor failures occur. Please ensure that adequate amount of infrastructure is assigned to this deployment to prevent this.',
- summary: '[nova] Failure risk',
- },
- expr: '(sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) - max(openstack_nova_memory_used_bytes)) / sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) * 100 < 0.25',
- 'for': '6h',
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'NovaCapacity',
- annotations: {
- description: 'The cloud capacity is currently at `{{ $value }}` which means there is a risk of running out of capacity due to the timeline required to add new nodes. Please ensure that adequate amount of infrastructure is assigned to this deployment to prevent this.',
- summary: '[nova] Capacity risk',
- },
- expr: 'sum ( openstack_nova_memory_used_bytes + on(hostname) group_left(adminState) (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"}) ) / sum ( openstack_nova_memory_available_bytes + on(hostname) group_left(adminState) (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"}) ) * 100 > 75',
- 'for': '6h',
- labels: {
- severity: 'warning',
- },
- },
- ],
- },
- ],
+ ],
+ },
+ {
+ name: 'softnet',
+ rules:
+ local capitalize(s) = std.asciiUpper(std.substr(s, 0, 1)) + std.substr(s, 1, std.length(s) - 1);
+ local alertRule(metric, threshold, nodesAffected) = {
+ alert: {
+ '0': 'SingleNodeSoftNet' + capitalize(metric),
+ '0.5': 'MultipleNodesSoftNet' + capitalize(metric),
+ '0.75': 'MajorityNodesSoftNet' + capitalize(metric),
+ }[nodesAffected],
+ expr: 'count(node:softnet:%s:1m > %s) > (count(node:softnet:%s:1m) * %s)' % [metric, threshold, metric, nodesAffected],
+ 'for': '1m',
+ labels: {
+ severity: {
+ '0': 'P3',
+ '0.5': 'P2',
+ '0.75': 'P1',
+ }[nodesAffected],
+ },
+ };
+ [
+ alertRule('backlog', '5000', '0'),
+ alertRule('backlog', '5000', '0.5'),
+ alertRule('backlog', '5000', '0.75'),
+
+ alertRule('squeezed', '0', '0'),
+
+ alertRule('dropped', '0', '0'),
+ alertRule('dropped', '0', '0.5'),
+ alertRule('dropped', '0', '0.75'),
+ ],
+ },
+ ],
+ },
},
rabbitmq: {
- groups: [
- {
- name: 'recording',
- rules: [
- {
- record: 'rabbitmq:usage:memory',
- labels: {
- job: 'rabbitmq',
+ prometheusRules+:: {
+ groups: [
+ {
+ name: 'recording',
+ rules:
+ [
+ {
+ record: 'rabbitmq:usage:memory',
+ labels: {
+ job: 'rabbitmq',
+ },
+ expr: 'sum without (job) ( rabbitmq_process_resident_memory_bytes ) / sum without ( container, pod, job, namespace, node, resource, uid, unit ) ( label_replace( cluster:namespace:pod_memory:active:kube_pod_container_resource_limits, "instance", "$1", "pod", "(.*)" ) )',
+ },
+ ],
+ },
+ ],
+ },
+ prometheusAlerts+:: {
+ groups: [
+ {
+ name: 'alarms',
+ rules: [
+ {
+ alert: 'RabbitmqAlarmFreeDiskSpace',
+ expr: 'rabbitmq_alarms_free_disk_space_watermark == 1',
+ labels: {
+ severity: 'critical',
+ },
},
- expr: 'sum without (job) ( rabbitmq_process_resident_memory_bytes ) / sum without ( container, pod, job, namespace, node, resource, uid, unit ) ( label_replace( cluster:namespace:pod_memory:active:kube_pod_container_resource_limits, "instance", "$1", "pod", "(.*)" ) )',
- },
- ],
- },
- {
- name: 'alarms',
- rules: [
- {
- alert: 'RabbitmqAlarmFreeDiskSpace',
- expr: 'rabbitmq_alarms_free_disk_space_watermark == 1',
- labels: {
- severity: 'critical',
+ {
+ alert: 'RabbitmqAlarmMemoryUsedWatermark',
+ expr: 'rabbitmq_alarms_memory_used_watermark == 1',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqAlarmMemoryUsedWatermark',
- expr: 'rabbitmq_alarms_memory_used_watermark == 1',
- labels: {
- severity: 'critical',
+ {
+ alert: 'RabbitmqAlarmFileDescriptorLimit',
+ expr: 'rabbitmq_alarms_file_descriptor_limit == 1',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqAlarmFileDescriptorLimit',
- expr: 'rabbitmq_alarms_file_descriptor_limit == 1',
- labels: {
- severity: 'critical',
+ ],
+ },
+ {
+ name: 'limits',
+ rules: [
+ {
+ alert: 'RabbitmqMemoryHigh',
+ expr: 'rabbitmq:usage:memory > 0.80',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- ],
- },
- {
- name: 'limits',
- rules: [
- {
- alert: 'RabbitmqMemoryHigh',
- expr: 'rabbitmq:usage:memory > 0.80',
- labels: {
- severity: 'warning',
+ {
+ alert: 'RabbitmqMemoryHigh',
+ expr: 'rabbitmq:usage:memory > 0.95',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqMemoryHigh',
- expr: 'rabbitmq:usage:memory > 0.95',
- labels: {
- severity: 'critical',
+ {
+ alert: 'RabbitmqFileDescriptorsUsage',
+ expr: 'rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.80',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'RabbitmqFileDescriptorsUsage',
- expr: 'rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.80',
- labels: {
- severity: 'warning',
+ {
+ alert: 'RabbitmqFileDescriptorsUsage',
+ expr: 'rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.95',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqFileDescriptorsUsage',
- expr: 'rabbitmq_process_open_fds / rabbitmq_process_max_fds > 0.95',
- labels: {
- severity: 'critical',
+ {
+ alert: 'RabbitmqTcpSocketsUsage',
+ expr: 'rabbitmq_process_open_tcp_sockets / rabbitmq_process_max_tcp_sockets > 0.80',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- {
- alert: 'RabbitmqTcpSocketsUsage',
- expr: 'rabbitmq_process_open_tcp_sockets / rabbitmq_process_max_tcp_sockets > 0.80',
- labels: {
- severity: 'warning',
+ {
+ alert: 'RabbitmqTcpSocketsUsage',
+ expr: 'rabbitmq_process_open_tcp_sockets / rabbitmq_process_max_tcp_sockets > 0.95',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqTcpSocketsUsage',
- expr: 'rabbitmq_process_open_tcp_sockets / rabbitmq_process_max_tcp_sockets > 0.95',
- labels: {
- severity: 'critical',
+ ],
+ },
+ {
+ name: 'msgs',
+ rules: [
+ {
+ alert: 'RabbitmqUnackedMessages',
+ expr: 'sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000',
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
},
- },
- ],
- },
- {
- name: 'msgs',
- rules: [
- {
- alert: 'RabbitmqUnackedMessages',
- expr: 'sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000',
- 'for': '5m',
- labels: {
- severity: 'warning',
+ {
+ alert: 'RabbitmqUnackedMessages',
+ expr: 'sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000',
+ 'for': '1h',
+ labels: {
+ severity: 'critical',
+ },
},
- },
- {
- alert: 'RabbitmqUnackedMessages',
- expr: 'sum(rabbitmq_queue_messages_unacked) BY (queue) > 1000',
- 'for': '1h',
- labels: {
- severity: 'critical',
- },
- },
- ],
- },
- ],
+ ],
+ },
+ ],
+ },
},
}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/mixins.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/mixins.libsonnet
new file mode 100644
index 0000000..086b215
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/mixins.libsonnet
@@ -0,0 +1,133 @@
+// NOTE(mnaser): The following are a list of disabled alerts known to be noisy
+// or not useful.
+local disabledAlerts = [
+ // * Dropped `CephNodeDiskspaceWarning` because we already have a
+ // few alerts like `NodeFilesystemSpaceFillingUp`, etc.
+ 'CephNodeDiskspaceWarning',
+
+ // * Dropped `CephNodeNetworkPacketDrops` due to noisy alerts with
+ // no actionable items to fix it.
+ 'CephNodeNetworkPacketDrops',
+];
+
+// NOTE(mnaser): This is the default mapping for severities:
+// - P1: Full service disruption or significant loss of
+// functionality. Requires immediate action.
+// - P2: Major functionality broken, affecting large group of
+// users or critical components. Prompt attention needed.
+// - P3: Issues affecting smaller group of users or a single
+// system. Attention required during business hours.
+// - P4: Minor issues with limited impact. Attention and potential
+// action needed during standard business hours.
+// - P5: Normal activities or minor issues. Typically no immediate
+// attention or action required.
+local defaultSeverityMapping = {
+ critical: 'P1',
+ warning: 'P3',
+ info: 'P5',
+};
+
+// NOTE(mnaser): The mapping here follows the format 'AlertName:Severity'. The
+// 'Severity' corresponds to the severity level of the alert, and
+// it maps to one of the severity levels defined in
+// defaultSeverityMapping.
+local customSeverityMapping = {
+ 'CephMgrPrometheusModuleInactive:critical': 'P4',
+ 'CephMonDown:warning': 'P4',
+ 'CephMonDownQuorumAtRisk:critical': 'P3',
+ 'KubeJobFailed:warning': 'P4',
+};
+
+local getSeverity(rule) =
+ // Return immediately if the string starts with "P"
+ if std.startsWith(rule.labels.severity, 'P') then rule.labels.severity
+ else
+ local key = rule.alert + ':' + rule.labels.severity;
+ if key in customSeverityMapping then customSeverityMapping[key]
+ else defaultSeverityMapping[rule.labels.severity];
+
+local mixins = {
+ alertmanager: (import 'alertmanager-mixin/mixin.libsonnet') + {
+ _config+:: {
+ alertmanagerSelector: 'job="kube-prometheus-stack-alertmanager"',
+ },
+ },
+ ceph: (import 'ceph-mixin/mixin.libsonnet'),
+ coredns: (import 'coredns-mixin/mixin.libsonnet') + {
+ _config+:: {
+ corednsSelector: 'job="coredns"',
+ },
+ },
+ kube: (import 'kubernetes-mixin/mixin.libsonnet') + {
+ _config+:: {
+ kubeApiserverSelector: 'job="apiserver"',
+ },
+ },
+ memcached: (import 'memcached-mixin/mixin.libsonnet'),
+ mysqld: (import 'mysqld-mixin/mixin.libsonnet') + {
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'mysqld-extras',
+ rules: [
+ {
+ alert: 'MysqlTooManyConnections',
+ 'for': '1m',
+ expr: |||
+ max_over_time(mysql_global_status_threads_connected[1m]) / mysql_global_variables_max_connections * 100 > 80
+ |||,
+ labels: {
+ severity: 'warning',
+ },
+ },
+ {
+ alert: 'MysqlHighThreadsRunning',
+ 'for': '1m',
+ expr: |||
+ max_over_time(mysql_global_status_threads_running[1m]) / mysql_global_variables_max_connections * 100 > 60
+ |||,
+ labels: {
+ severity: 'warning',
+ },
+ },
+ {
+ alert: 'MysqlSlowQueries',
+ 'for': '2m',
+ expr: |||
+ increase(mysql_global_status_slow_queries[1m]) > 0
+ |||,
+ labels: {
+ severity: 'warning',
+ },
+ },
+ ],
+ },
+ ],
+ },
+ },
+ node: (import 'node-mixin/mixin.libsonnet'),
+ openstack: (import 'openstack.libsonnet'),
+} + (import 'legacy.libsonnet');
+
+{
+ [key]: mixins[key] {
+ prometheusAlerts: {
+ groups: [
+ {
+ name: group.name,
+ rules: [
+ rule {
+ labels+: {
+ severity: getSeverity(rule),
+ },
+ }
+ for rule in group.rules
+ if !std.member(disabledAlerts, rule.alert)
+ ],
+ }
+ for group in mixins[key].prometheusAlerts.groups
+ ],
+ },
+ }
+ for key in std.objectFields(mixins)
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/mysqld.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/mysqld.libsonnet
deleted file mode 100644
index 034f2aa..0000000
--- a/roles/kube_prometheus_stack/files/jsonnet/mysqld.libsonnet
+++ /dev/null
@@ -1,54 +0,0 @@
-local addAlerts = {
- prometheusAlerts+::
- {
- groups+: [
- {
- name: 'mysqld-extras',
-
- rules: [
- {
- alert: 'MysqlTooManyConnections',
- 'for': '1m',
- expr: |||
- max_over_time(mysql_global_status_threads_connected[1m]) / mysql_global_variables_max_connections * 100 > 80
- |||,
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'MysqlHighThreadsRunning',
- 'for': '1m',
- expr: |||
- max_over_time(mysql_global_status_threads_running[1m]) / mysql_global_variables_max_connections * 100 > 60
- |||,
- labels: {
- severity: 'warning',
- },
- },
- {
- alert: 'MysqlSlowQueries',
- 'for': '2m',
- expr: |||
- increase(mysql_global_status_slow_queries[1m]) > 0
- |||,
- labels: {
- severity: 'warning',
- },
- },
- ],
- },
- ],
- },
-};
-
-{
- prometheusAlerts: {
- groups:
- (
- std.parseYaml(importstr 'vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/alerts/general.yaml').groups +
- std.parseYaml(importstr 'vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/alerts/galera.yaml').groups +
- std.parseYaml(importstr 'vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/rules/rules.yaml').groups
- ),
- },
-} + addAlerts
diff --git a/roles/kube_prometheus_stack/files/jsonnet/openstack.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/openstack.libsonnet
new file mode 100644
index 0000000..4e78c21
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/openstack.libsonnet
@@ -0,0 +1,211 @@
+{
+ prometheusAlerts+: {
+ groups+: [
+ {
+ name: 'cinder',
+ rules: [
+ {
+ alert: 'CinderAgentDisabled',
+ expr: 'openstack_cinder_agent_state{adminState!="enabled"} > 0',
+ 'for': '24h',
+ labels: {
+ severity: 'P5',
+ },
+ annotations: {
+ summary: 'Cinder agent disabled',
+ description: 'A Cinder agent has been administratively disabled for more than 24 hours.',
+ },
+ },
+ {
+ alert: 'CinderAgentDown',
+ expr: 'openstack_cinder_agent_state != 1',
+ 'for': '15m',
+ labels: {
+ severity: 'P3',
+ },
+ annotations: {
+ summary: 'Cinder agent down',
+ description: 'A Cinder agent has been down for more than 15 minutes.',
+ },
+ },
+ {
+ alert: 'CinderAgentGroupDown',
+ expr: 'min by (exported_service) (openstack_cinder_agent_state) == 0',
+ 'for': '5m',
+ labels: {
+ severity: 'P2',
+ },
+ annotations: {
+ summary: 'Cinder agent group down',
+ description: 'All instances of a specific Cinder agent have been down for more than 5 minutes.',
+ },
+ },
+ {
+ alert: 'CinderVolumeError',
+ expr: 'openstack_cinder_volume_status{status=~"error.*"} > 0',
+ 'for': '24h',
+ labels: {
+ severity: 'P4',
+ },
+ annotations: {
+ summary: 'Cinder volume error',
+ description: 'A Cinder volume is in an error state.',
+ },
+ },
+ ],
+ },
+ {
+ name: 'neutron',
+ rules:
+ [
+ {
+ alert: 'NeutronAgentDisabled',
+ expr: 'openstack_neutron_agent_state{adminState!="up"} > 0',
+ 'for': '24h',
+ labels: {
+ severity: 'P5',
+ },
+ annotations: {
+ summary: 'Neutron agent disabled',
+ description: 'A Neutron agent has been administratively disabled for more than 24 hours.',
+ },
+ },
+ {
+ alert: 'NeutronAgentDown',
+ expr: 'openstack_neutron_agent_state != 1',
+ 'for': '15m',
+ labels: {
+ severity: 'P3',
+ },
+ annotations: {
+ summary: 'Neutron agent down',
+ description: 'A Neutron agent has been down for more than 15 minutes.',
+ },
+ },
+ {
+ alert: 'NeutronAgentGroupDown',
+ expr: 'min by (exported_service) (openstack_neutron_agent_state) == 0',
+ 'for': '5m',
+ labels: {
+ severity: 'P2',
+ },
+ annotations: {
+ summary: 'Neutron agent group down',
+ description: 'All instances of a specific Neutron agent have been down for more than 5 minutes.',
+ },
+ }
+ {
+ alert: 'NeutronNetworkOutOfIPs',
+ annotations: {
+ description: 'The subnet {{ $labels.subnet_name }} within {{ $labels.network_name }} is currently at {{ $value }}% utilization. If the IP addresses run out, it will impact the provisioning of new ports.',
+ summary: '[{{ $labels.network_name }}] {{ $labels.subnet_name }} running out of IPs',
+ },
+ expr: 'sum by (network_id) (openstack_neutron_network_ip_availabilities_used{project_id!=""}) / sum by (network_id) (openstack_neutron_network_ip_availabilities_total{project_id!=""}) * 100 > 80',
+ 'for': '6h',
+ labels: {
+ severity: 'warning',
+ },
+ },
+ ],
+ },
+ {
+ name: 'neutron-port-bindings',
+ rules:
+ local alert(severity, expr, description) = {
+ alert: 'NeutronPortBindingFailed',
+ expr: expr,
+ 'for': '5m',
+ labels: {
+ severity: severity,
+ },
+ annotations: {
+ summary: 'Neutron Port Binding Failed',
+ description: description,
+ },
+ };
+ [
+ alert('P4', 'count(neutron_port{binding_vif_type="binding_failed"}) > 0', 'At least one Neutron port has failed to bind.'),
+ alert('P3', '(count(neutron_port{binding_vif_type="binding_failed"}) / count(neutron_port)) > 0.05', 'More than 5% of Neutron ports have failed to bind.'),
+ alert('P2', '(count(neutron_port{binding_vif_type="binding_failed"}) / count(neutron_port)) > 0.5', 'More than 50% of Neutron ports have failed to bind.'),
+ ],
+ },
+ {
+ name: 'nova',
+ rules: [
+ {
+ alert: 'NovaServiceDisabled',
+ expr: 'openstack_nova_agent_state{adminState!="enabled"} > 0',
+ 'for': '24h',
+ labels: {
+ severity: 'P5',
+ },
+ annotations: {
+ summary: 'Nova service disabled',
+ description: 'A Nova service has been administratively disabled for more than 24 hours.',
+ },
+ },
+ {
+ alert: 'NovaServiceDown',
+ expr: 'openstack_nova_agent_state != 1',
+ 'for': '15m',
+ labels: {
+ severity: 'P3',
+ },
+ annotations: {
+ summary: 'Nova service down',
+ description: 'A Nova service has been down for more than 15 minutes.',
+ },
+ },
+ {
+ alert: 'NovaServiceGroupDown',
+ expr: 'min by (exported_service) (openstack_nova_agent_state) == 0',
+ 'for': '5m',
+ labels: {
+ severity: 'P2',
+ },
+ annotations: {
+ summary: 'Nova service group down',
+ description: 'All instances of a specific Nova service have been down for more than 5 minutes.',
+ },
+ },
+ {
+ alert: 'NovaInstanceError',
+ expr: 'openstack_nova_server_status{status="ERROR"} > 0',
+ 'for': '24h',
+ labels: {
+ severity: 'P4',
+ },
+ annotations: {
+ summary: 'Nova server error',
+ description: 'A Nova server is in an error state.',
+ },
+ },
+ {
+ alert: 'NovaFailureRisk',
+ annotations: {
+ description: 'The cloud capacity will be at {{ $value }} in the event of the failure of a single hypervisor which puts the cloud at risk of not being able to recover should any hypervisor failures occur. Please ensure that adequate amount of infrastructure is assigned to this deployment to prevent this.',
+ summary: '[nova] Failure risk',
+ },
+ expr: '(sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) - max(openstack_nova_memory_used_bytes)) / sum(openstack_nova_memory_available_bytes-openstack_nova_memory_used_bytes) * 100 < 0.25',
+ 'for': '6h',
+ labels: {
+ severity: 'warning',
+ },
+ },
+ {
+ alert: 'NovaCapacity',
+ annotations: {
+ description: 'The cloud capacity is currently at `{{ $value }}` which means there is a risk of running out of capacity due to the timeline required to add new nodes. Please ensure that adequate amount of infrastructure is assigned to this deployment to prevent this.',
+ summary: '[nova] Capacity risk',
+ },
+ expr: 'sum ( openstack_nova_memory_used_bytes + on(hostname) group_left(adminState) (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"}) ) / sum ( openstack_nova_memory_available_bytes + on(hostname) group_left(adminState) (0 * openstack_nova_agent_state{exported_service="nova-compute",adminState="enabled"}) ) * 100 > 75',
+ 'for': '6h',
+ labels: {
+ severity: 'warning',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/rules.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/rules.jsonnet
index e562e6e..0eed9bd 100644
--- a/roles/kube_prometheus_stack/files/jsonnet/rules.jsonnet
+++ b/roles/kube_prometheus_stack/files/jsonnet/rules.jsonnet
@@ -1,18 +1,9 @@
-local legacy = import 'legacy.libsonnet';
-
-local ceph = import 'ceph.libsonnet';
-local mysqld = import 'mysqld.libsonnet';
-local memcached = import 'vendor/github.com/grafana/jsonnet-libs/memcached-mixin/mixin.libsonnet';
-
-local coredns = (import 'vendor/github.com/povilasv/coredns-mixin/mixin.libsonnet') + {
- _config+:: {
- corednsSelector: 'job="coredns"',
- },
-};
+local mixins = import 'mixins.libsonnet';
{
- ceph: ceph.prometheusAlerts,
- coredns: coredns.prometheusAlerts,
- memcached: memcached.prometheusAlerts,
- 'percona-xtradb-pxc': mysqld.prometheusAlerts,
-} + legacy
+ [mixin]:
+ mixins[mixin].prometheusAlerts + (
+ if std.objectHasAll(mixins[mixin], 'prometheusRules') then mixins[mixin].prometheusRules else {}
+ )
+ for mixin in std.objectFields(mixins)
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/alertmanager-mixin b/roles/kube_prometheus_stack/files/jsonnet/vendor/alertmanager-mixin
new file mode 120000
index 0000000..96f32a4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/alertmanager-mixin
@@ -0,0 +1 @@
+github.com/prometheus/alertmanager/doc/alertmanager-mixin
\ No newline at end of file
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/DOCS.md b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/DOCS.md
new file mode 100644
index 0000000..9e3c09f
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/DOCS.md
@@ -0,0 +1,885 @@
+# Docs
+
+* [dashboard](#dashboard)
+* [panel](#panel)
+ * [gauge.new](#panelGaugenew)
+ * [graph.new](#panelGraphnew)
+ * [row.new](#panelRownew)
+ * [stat.new](#panelStatnew)
+ * [table.new](#panelTablenew)
+ * [text.new](#panelTextnew)
+* [target](#target)
+ * [prometheus.new](#targetPrometheusnew)
+* [template](#template)
+ * [custom.new](#templateCustomnew)
+ * [datasource.new](#templateDatasourcenew)
+ * [query.new](#templateQuerynew)
+
+## dashboard
+
+
+
+### dashboard.new
+
+Instantiate a dashboard.
+
+* **description**: (type: string, default: `null`)
+
+* **editable**: (type: boolean, default: `true`)
+
+* **graphTooltip**: (type: integer, default: `0`)
+
+* **refresh**: (type: string, default: `null`)
+
+* **schemaVersion**: (type: integer, default: `25`)
+
+* **style**: (type: string, default: `"dark"`)
+
+* **tags**: (type: array, default: `[]`)
+
+* **timezone**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **uid**: (type: string, default: `null`)
+
+
+#### #setTime
+
+* **from**: (type: string, default: `"now-6h"`)
+
+* **to**: (type: string, default: `"now"`)
+
+#### #setTimepicker
+
+* **hidden**: (type: boolean, default: `false`)
+
+* **refreshIntervals**: (type: array, default: `["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"]`)
+
+
+#### #addAnnotation
+
+* **builtIn**: (type: integer, default: `0`)
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **enable**: (type: boolean, default: `true`)
+
+* **hide**: (type: boolean, default: `false`)
+
+* **iconColor**: (type: string, default: `null`)
+
+* **name**: (type: string, default: `null`)
+
+* **rawQuery**: (type: string, default: `null`)
+
+* **showIn**: (type: integer, default: `0`)
+
+#### #addTemplate
+
+* **template**: (type: object)
+
+
+
+## panel
+
+
+
+### panel.gauge.new
+
+
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **description**: (type: string, default: `null`)
+
+* **repeat**: (type: string, default: `null`)
+
+* **repeatDirection**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **transparent**: (type: boolean, default: `false`)
+
+
+#### #setFieldConfig
+
+* **max**: (type: integer, default: `null`)
+
+* **min**: (type: integer, default: `null`)
+
+* **thresholdMode**: (type: string, default: `"absolute"`)
+
+* **unit**: (type: string, default: `null`)
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+#### #setOptions
+
+* **calcs**: (type: array, default: `["mean"]`)
+
+* **fields**: (type: string, default: `null`)
+
+* **orientation**: (type: string, default: `"auto"`)
+
+* **showThresholdLabels**: (type: boolean, default: `false`)
+
+* **showThresholdMarkers**: (type: boolean, default: `true`)
+
+* **values**: (type: boolean, default: `false`)
+
+
+#### #addDataLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addPanelLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addMapping
+
+* **from**: (type: string, default: `null`)
+
+* **id**: (type: integer, default: `null`)
+
+* **operator**: (type: string, default: `null`)
+
+* **text**: (type: string, default: `null`)
+
+* **to**: (type: string, default: `null`)
+
+* **type**: (type: integer, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+#### #addOverride
+
+* **matcher**: (type: oject, default: `null`)
+
+* **properties**: (type: array, default: `null`)
+
+#### #addThresholdStep
+
+* **color**: (type: string, default: `null`)
+
+* **value**: (type: integer, default: `null`)
+
+#### #addTarget
+
+* **target**: (type: object)
+
+
+
+### panel.graph.new
+
+
+
+* **bars**: (type: boolean, default: `false`)
+ Display values as a bar chart.
+* **dashLength**: (type: integer, default: `10`)
+ Dashed line length.
+* **dashes**: (type: boolean, default: `false`)
+ Show line with dashes.
+* **datasource**: (type: string, default: `"default"`)
+
+* **decimals**: (type: integer, default: `null`)
+ Controls how many decimals are displayed for legend values and
+ graph hover tooltips.
+* **description**: (type: string, default: `null`)
+
+* **fill**: (type: integer, default: `1`)
+ Amount of color fill for a series. Expects a value between 0 and 1.
+* **fillGradient**: (type: integer, default: `0`)
+ Degree of gradient on the area fill. 0 is no gradient, 10 is a
+ steep gradient.
+* **hiddenSeries**: (type: boolean, default: `false`)
+ Hide the series.
+* **lines**: (type: boolean, default: `true`)
+ Display values as a line graph.
+* **linewidth**: (type: integer, default: `1`)
+ The width of the line for a series.
+* **nullPointMode**: (type: string, default: `"null"`)
+ How null values are displayed.
+ * 'null' - If there is a gap in the series, meaning a null value,
+ then the line in the graph will be broken and show the gap.
+ * 'null as zero' - If there is a gap in the series, meaning a null
+ value, then it will be displayed as a zero value in the graph
+ panel.
+ * 'connected' - If there is a gap in the series, meaning a null
+ value or values, then the line will skip the gap and connect to the
+ next non-null value.
+* **percentage**: (type: boolean, default: `false`)
+ Available when `stack` is true. Each series is drawn as a percentage
+ of the total of all series.
+* **pointradius**: (type: integer, default: `null`)
+ Controls how large the points are.
+* **points**: (type: boolean, default: `false`)
+ Display points for values.
+* **repeat**: (type: string, default: `null`)
+
+* **repeatDirection**: (type: string, default: `null`)
+
+* **spaceLength**: (type: integer, default: `10`)
+ Dashed line spacing when `dashes` is true.
+* **stack**: (type: boolean, default: `false`)
+ Each series is stacked on top of another.
+* **steppedLine**: (type: boolean, default: `false`)
+ Draws adjacent points as staircase.
+* **timeFrom**: (type: string, default: `null`)
+
+* **timeShift**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **transparent**: (type: boolean, default: `false`)
+
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+#### #setLegend
+
+* **alignAsTable**: (type: boolean, default: `null`)
+ Whether to display legend in table.
+* **avg**: (type: boolean, default: `false`)
+ Average of all values returned from the metric query.
+* **current**: (type: boolean, default: `false`)
+ Last value returned from the metric query.
+* **max**: (type: boolean, default: `false`)
+ Maximum of all values returned from the metric query.
+* **min**: (type: boolean, default: `false`)
+ Minimum of all values returned from the metric query.
+* **rightSide**: (type: boolean, default: `false`)
+ Display legend to the right.
+* **show**: (type: boolean, default: `true`)
+ Show or hide the legend.
+* **sideWidth**: (type: integer, default: `null`)
+ Available when `rightSide` is true. The minimum width for the legend in
+ pixels.
+* **total**: (type: boolean, default: `false`)
+ Sum of all values returned from the metric query.
+* **values**: (type: boolean, default: `true`)
+
+#### #setThresholds
+
+* **thresholdMode**: (type: string, default: `"absolute"`)
+
+#### #setTooltip
+
+* **shared**: (type: boolean, default: `true`)
+ * true - The hover tooltip shows all series in the graph.
+ Grafana highlights the series that you are hovering over in
+ bold in the series list in the tooltip.
+ * false - The hover tooltip shows only a single series, the one
+ that you are hovering over on the graph.
+* **sort**: (type: integer, default: `2`)
+ * 0 (none) - The order of the series in the tooltip is
+ determined by the sort order in your query. For example, they
+ could be alphabetically sorted by series name.
+ * 1 (increasing) - The series in the hover tooltip are sorted
+ by value and in increasing order, with the lowest value at the
+ top of the list.
+ * 2 (decreasing) - The series in the hover tooltip are sorted
+ by value and in decreasing order, with the highest value at the
+ top of the list.
+#### #setXaxis
+
+* **buckets**: (type: string, default: `null`)
+
+* **mode**: (type: string, default: `"time"`)
+ The display mode completely changes the visualization of the
+ graph panel. It’s like three panels in one. The main mode is
+ the time series mode with time on the X-axis. The other two
+ modes are a basic bar chart mode with series on the X-axis
+ instead of time and a histogram mode.
+ * 'time' - The X-axis represents time and that the data is
+ grouped by time (for example, by hour, or by minute).
+ * 'series' - The data is grouped by series and not by time. The
+ Y-axis still represents the value.
+ * 'histogram' - Converts the graph into a histogram. A histogram
+ is a kind of bar chart that groups numbers into ranges, often
+ called buckets or bins. Taller bars show that more data falls
+ in that range.
+* **name**: (type: string, default: `null`)
+
+* **show**: (type: boolean, default: `true`)
+ Show or hide the axis.
+#### #setYaxis
+
+* **align**: (type: boolean, default: `false`)
+ Align left and right Y-axes by value.
+* **alignLevel**: (type: integer, default: `0`)
+ Available when align is true. Value to use for alignment of
+ left and right Y-axes, starting from Y=0.
+
+#### #addDataLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addPanelLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addOverride
+
+* **matcher**: (type: oject, default: `null`)
+
+* **properties**: (type: array, default: `null`)
+
+#### #addSeriesOverride
+
+* **alias**: (type: string, default: `null`)
+ Alias or regex matching the series you'd like to target.
+* **bars**: (type: boolean, default: `null`)
+
+* **color**: (type: string, default: `null`)
+
+* **dashLength**: (type: integer, default: `null`)
+
+* **dashes**: (type: boolean, default: `null`)
+
+* **fill**: (type: integer, default: `null`)
+
+* **fillBelowTo**: (type: string, default: `null`)
+
+* **fillGradient**: (type: integer, default: `null`)
+
+* **hiddenSeries**: (type: boolean, default: `null`)
+
+* **hideTooltip**: (type: boolean, default: `null`)
+
+* **legend**: (type: boolean, default: `null`)
+
+* **lines**: (type: boolean, default: `null`)
+
+* **linewidth**: (type: integer, default: `null`)
+
+* **nullPointMode**: (type: string, default: `null`)
+
+* **pointradius**: (type: integer, default: `null`)
+
+* **points**: (type: boolean, default: `null`)
+
+* **spaceLength**: (type: integer, default: `null`)
+
+* **stack**: (type: integer, default: `null`)
+
+* **steppedLine**: (type: boolean, default: `null`)
+
+* **transform**: (type: string, default: `null`)
+
+* **yaxis**: (type: integer, default: `null`)
+
+* **zindex**: (type: integer, default: `null`)
+
+#### #addThresholdStep
+
+* **color**: (type: string, default: `null`)
+
+* **value**: (type: integer, default: `null`)
+
+#### #addTarget
+
+* **target**: (type: object)
+
+#### #addYaxis
+
+* **decimals**: (type: integer, default: `null`)
+ Defines how many decimals are displayed for Y value.
+* **format**: (type: string, default: `"short"`)
+ The display unit for the Y value.
+* **label**: (type: string, default: `null`)
+ The Y axis label.
+* **logBase**: (type: integer, default: `1`)
+ The scale to use for the Y value - linear, or logarithmic.
+ * 1 - linear
+ * 2 - log (base 2)
+ * 10 - log (base 10)
+ * 32 - log (base 32)
+ * 1024 - log (base 1024)
+* **max**: (type: integer, default: `null`)
+ The maximum Y value.
+* **min**: (type: integer, default: `null`)
+ The minimum Y value.
+* **show**: (type: boolean, default: `true`)
+ Show or hide the axis.
+
+
+### panel.row.new
+
+
+
+* **collapse**: (type: boolean, default: `true`)
+
+* **collapsed**: (type: boolean, default: `true`)
+
+* **datasource**: (type: string, default: `null`)
+
+* **repeat**: (type: string, default: `null`)
+
+* **repeatIteration**: (type: string, default: `null`)
+
+* **showTitle**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **titleSize**: (type: string, default: `"h6"`)
+
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+
+#### #addPanel
+
+* **panel**: (type: object)
+
+
+
+### panel.stat.new
+
+
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **description**: (type: string, default: `null`)
+
+* **repeat**: (type: string, default: `null`)
+
+* **repeatDirection**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **transparent**: (type: boolean, default: `false`)
+
+
+#### #setFieldConfig
+
+* **max**: (type: integer, default: `null`)
+
+* **min**: (type: integer, default: `null`)
+
+* **thresholdMode**: (type: string, default: `"absolute"`)
+
+* **unit**: (type: string, default: `null`)
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+#### #setOptions
+
+* **calcs**: (type: array, default: `["mean"]`)
+
+* **colorMode**: (type: string, default: `"value"`)
+
+* **fields**: (type: string, default: `null`)
+
+* **graphMode**: (type: string, default: `"none"`)
+
+* **justifyMode**: (type: string, default: `"auto"`)
+
+* **orientation**: (type: string, default: `"auto"`)
+
+* **textMode**: (type: string, default: `"auto"`)
+
+* **values**: (type: boolean, default: `false`)
+
+
+#### #addDataLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addPanelLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addMapping
+
+* **from**: (type: string, default: `null`)
+
+* **id**: (type: integer, default: `null`)
+
+* **operator**: (type: string, default: `null`)
+
+* **text**: (type: string, default: `null`)
+
+* **to**: (type: string, default: `null`)
+
+* **type**: (type: integer, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+#### #addOverride
+
+* **matcher**: (type: oject, default: `null`)
+
+* **properties**: (type: array, default: `null`)
+
+#### #addThresholdStep
+
+* **color**: (type: string, default: `null`)
+
+* **value**: (type: integer, default: `null`)
+
+#### #addTarget
+
+* **target**: (type: object)
+
+
+
+### panel.table.new
+
+
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **description**: (type: string, default: `null`)
+
+* **repeat**: (type: string, default: `null`)
+
+* **repeatDirection**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **transparent**: (type: boolean, default: `false`)
+
+
+#### #setFieldConfig
+
+* **displayName**: (type: string, default: `null`)
+
+* **max**: (type: integer, default: `null`)
+
+* **min**: (type: integer, default: `null`)
+
+* **thresholdMode**: (type: string, default: `"absolute"`)
+
+* **noValue**: (type: string, default: `null`)
+
+* **unit**: (type: string, default: `"short"`)
+
+* **width**: (type: integer, default: `null`)
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+#### #setOptions
+
+* **showHeader**: (type: boolean, default: `true`)
+
+
+#### #addDataLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addPanelLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addMapping
+
+* **from**: (type: string, default: `null`)
+
+* **id**: (type: integer, default: `null`)
+
+* **operator**: (type: string, default: `null`)
+
+* **text**: (type: string, default: `null`)
+
+* **to**: (type: string, default: `null`)
+
+* **type**: (type: integer, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+#### #addOverride
+
+* **matcher**: (type: oject, default: `null`)
+
+* **properties**: (type: array, default: `null`)
+
+#### #addThresholdStep
+
+* **color**: (type: string, default: `null`)
+
+* **value**: (type: integer, default: `null`)
+
+#### #addTarget
+
+* **target**: (type: object)
+
+
+
+### panel.text.new
+
+
+
+* **content**: (type: string, default: `null`)
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **description**: (type: string, default: `null`)
+
+* **mode**: (type: string, default: `"markdown"`)
+
+* **repeat**: (type: string, default: `null`)
+
+* **repeatDirection**: (type: string, default: `null`)
+
+* **title**: (type: string, default: `null`)
+
+* **transparent**: (type: boolean, default: `false`)
+
+
+#### #setGridPos
+
+* **h**: (type: integer, default: `8`)
+ Panel height.
+* **w**: (type: integer, default: `12`)
+ Panel width.
+* **x**: (type: integer, default: `null`)
+ Panel x position.
+* **y**: (type: integer, default: `null`)
+ Panel y position.
+
+#### #addPanelLink
+
+* **targetBlank**: (type: boolean, default: `true`)
+
+* **title**: (type: string, default: `null`)
+
+* **url**: (type: string, default: `null`)
+
+#### #addTarget
+
+* **target**: (type: object)
+
+
+
+
+## target
+
+
+
+### target.prometheus.new
+
+
+
+* **datasource**: (type: string, default: `"default"`)
+
+* **expr**: (type: string, default: `null`)
+
+* **format**: (type: string, default: `"time_series"`)
+
+* **instant**: (type: boolean, default: `null`)
+
+* **interval**: (type: string, default: `null`)
+
+* **intervalFactor**: (type: integer, default: `null`)
+
+* **legendFormat**: (type: string, default: `null`)
+
+
+
+
+
+
+## template
+
+
+
+### template.custom.new
+
+
+
+* **allValue**: (type: string, default: `null`)
+
+* **hide**: (type: integer, default: `0`)
+
+* **includeAll**: (type: boolean, default: `false`)
+
+* **label**: (type: string, default: `null`)
+
+* **multi**: (type: boolean, default: `false`)
+
+* **name**: (type: string, default: `null`)
+
+* **query**: (type: string, default: `null`)
+
+* **queryValue**: (type: string, default: `""`)
+
+* **skipUrlSync**: (type: string, default: `false`)
+
+
+#### #setCurrent
+
+* **selected**: (type: boolean, default: `false`)
+
+* **text**: (type: string, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+
+
+
+### template.datasource.new
+
+
+
+* **hide**: (type: integer, default: `0`)
+
+* **includeAll**: (type: boolean, default: `false`)
+
+* **label**: (type: string, default: `null`)
+
+* **multi**: (type: boolean, default: `false`)
+
+* **name**: (type: string, default: `null`)
+
+* **query**: (type: string, default: `null`)
+
+* **refresh**: (type: integer, default: `1`)
+
+* **regex**: (type: string, default: `null`)
+
+* **skipUrlSync**: (type: string, default: `false`)
+
+
+#### #setCurrent
+
+* **selected**: (type: boolean, default: `false`)
+
+* **text**: (type: string, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+
+
+
+### template.query.new
+
+
+
+* **allValue**: (type: string, default: `null`)
+
+* **datasource**: (type: string, default: `null`)
+
+* **definition**: (type: string, default: `null`)
+
+* **hide**: (type: integer, default: `0`)
+
+* **includeAll**: (type: boolean, default: `false`)
+
+* **label**: (type: string, default: `null`)
+
+* **multi**: (type: boolean, default: `false`)
+
+* **name**: (type: string, default: `null`)
+
+* **query**: (type: string, default: `null`)
+
+* **refresh**: (type: integer, default: `0`)
+
+* **regex**: (type: string, default: `null`)
+
+* **skipUrlSync**: (type: string, default: `false`)
+
+* **sort**: (type: integer, default: `0`)
+
+* **tagValuesQuery**: (type: string, default: `null`)
+
+* **tags**: (type: array, default: `null`)
+
+* **tagsQuery**: (type: string, default: `null`)
+
+* **useTags**: (type: boolean, default: `false`)
+
+
+#### #setCurrent
+
+* **selected**: (type: boolean, default: `null`)
+
+* **text**: (type: string, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+
+#### #addOption
+
+* **selected**: (type: boolean, default: `true`)
+
+* **text**: (type: string, default: `null`)
+
+* **value**: (type: string, default: `null`)
+
+
+
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/dashboard.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/dashboard.libsonnet
new file mode 100644
index 0000000..faa25c6
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/dashboard.libsonnet
@@ -0,0 +1,87 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ description=null,
+ editable=true,
+ graphTooltip=0,
+ refresh=null,
+ schemaVersion=25,
+ style='dark',
+ tags=[],
+ timezone=null,
+ title=null,
+ uid=null,
+ ):: {
+ [if description != null then 'description']: description,
+ [if editable != null then 'editable']: editable,
+ [if graphTooltip != null then 'graphTooltip']: graphTooltip,
+ [if refresh != null then 'refresh']: refresh,
+ [if schemaVersion != null then 'schemaVersion']: schemaVersion,
+ [if style != null then 'style']: style,
+ [if tags != null then 'tags']: tags,
+ [if timezone != null then 'timezone']: timezone,
+ [if title != null then 'title']: title,
+ [if uid != null then 'uid']: uid,
+
+ setTime(
+ from='now-6h',
+ to='now',
+ ):: self {}
+ + { time+: { [if from != null then 'from']: from } }
+ + { time+: { [if to != null then 'to']: to } }
+ ,
+
+ setTimepicker(
+ hidden=false,
+ refreshIntervals=['5s', '10s', '30s', '1m', '5m', '15m', '30m', '1h', '2h', '1d'],
+ ):: self {}
+ + { timepicker+: { [if hidden != null then 'hidden']: hidden } }
+ + { timepicker+: { [if refreshIntervals != null then 'refresh_intervals']: refreshIntervals } }
+ ,
+
+
+ addTemplate(
+ template
+ ):: self {}
+ + { templating+: { list+: [
+ template,
+ ] } },
+
+ addAnnotation(
+ builtIn=0,
+ datasource='default',
+ enable=true,
+ hide=false,
+ iconColor=null,
+ name=null,
+ rawQuery=null,
+ showIn=0,
+ ):: self {}
+ + { annotations+: { list+: [
+ {
+ [if builtIn != null then 'builtIn']: builtIn,
+ [if datasource != null then 'datasource']: datasource,
+ [if enable != null then 'enable']: enable,
+ [if hide != null then 'hide']: hide,
+ [if iconColor != null then 'iconColor']: iconColor,
+ [if name != null then 'name']: name,
+ [if rawQuery != null then 'rawQuery']: rawQuery,
+ [if showIn != null then 'showIn']: showIn,
+ },
+ ] } },
+
+
+ panels: [],
+ _nextPanelID:: 2,
+ addPanel(panel):: self {
+ local nextPanelID = super._nextPanelID,
+ panels+: [
+ panel { id: nextPanelID } +
+ if 'panels' in panel then { panels: std.mapWithIndex(function(i, p) p { id: nextPanelID + i + 1 }, panel.panels) } else {},
+ ],
+ _nextPanelID:: nextPanelID + 1 + (if 'panels' in panel then std.length(panel.panels) else 0),
+ },
+ addPanels(panels):: std.foldl(function(d, p) d.addPanel(p), panels, self),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet
new file mode 100644
index 0000000..28e65d1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet
@@ -0,0 +1,21 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ dashboard:: import 'dashboard.libsonnet',
+ panel:: {
+ gauge:: import 'panel/gauge.libsonnet',
+ graph:: import 'panel/graph.libsonnet',
+ row:: import 'panel/row.libsonnet',
+ stat:: import 'panel/stat.libsonnet',
+ table:: import 'panel/table.libsonnet',
+ text:: import 'panel/text.libsonnet',
+ },
+ target:: {
+ prometheus:: import 'target/prometheus.libsonnet',
+ },
+ template:: {
+ custom:: import 'template/custom.libsonnet',
+ datasource:: import 'template/datasource.libsonnet',
+ query:: import 'template/query.libsonnet',
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/gauge.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/gauge.libsonnet
new file mode 100644
index 0000000..715c444
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/gauge.libsonnet
@@ -0,0 +1,138 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ datasource='default',
+ description=null,
+ repeat=null,
+ repeatDirection=null,
+ title=null,
+ transparent=false,
+ ):: {
+ [if datasource != null then 'datasource']: datasource,
+ [if description != null then 'description']: description,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatDirection != null then 'repeatDirection']: repeatDirection,
+ [if title != null then 'title']: title,
+ [if transparent != null then 'transparent']: transparent,
+ type: 'gauge',
+
+ setFieldConfig(
+ max=null,
+ min=null,
+ thresholdMode='absolute',
+ unit=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { [if max != null then 'max']: max } } }
+ + { fieldConfig+: { defaults+: { [if min != null then 'min']: min } } }
+ + { fieldConfig+: { defaults+: { thresholds+: { [if thresholdMode != null then 'mode']: thresholdMode } } } }
+ + { fieldConfig+: { defaults+: { [if unit != null then 'unit']: unit } } }
+ ,
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+ setOptions(
+ calcs=['mean'],
+ fields=null,
+ orientation='auto',
+ showThresholdLabels=false,
+ showThresholdMarkers=true,
+ values=false,
+ ):: self {}
+ + { options+: { reduceOptions+: { [if calcs != null then 'calcs']: calcs } } }
+ + { options+: { reduceOptions+: { [if fields != null then 'fields']: fields } } }
+ + { options+: { [if orientation != null then 'orientation']: orientation } }
+ + { options+: { [if showThresholdLabels != null then 'showThresholdLabels']: showThresholdLabels } }
+ + { options+: { [if showThresholdMarkers != null then 'showThresholdMarkers']: showThresholdMarkers } }
+ + { options+: { reduceOptions+: { [if values != null then 'values']: values } } }
+ ,
+
+
+ addPanelLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] },
+
+ addDataLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] } } },
+
+ addMapping(
+ from=null,
+ id=null,
+ operator=null,
+ text=null,
+ to=null,
+ type=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { mappings+: [
+ {
+ [if from != null then 'from']: from,
+ [if id != null then 'id']: id,
+ [if operator != null then 'operator']: operator,
+ [if text != null then 'text']: text,
+ [if to != null then 'to']: to,
+ [if type != null then 'type']: type,
+ [if value != null then 'value']: value,
+ },
+ ] } } },
+
+ addOverride(
+ matcher=null,
+ properties=null,
+ ):: self {}
+ + { fieldConfig+: { overrides+: [
+ {
+ [if matcher != null then 'matcher']: matcher,
+ [if properties != null then 'properties']: properties,
+ },
+ ] } },
+
+ addThresholdStep(
+ color=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { thresholds+: { steps+: [
+ {
+ [if color != null then 'color']: color,
+ [if value != null then 'value']: value,
+ },
+ ] } } } },
+
+ addTarget(
+ target
+ ):: self {}
+ + { targets+: [
+ target,
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/graph.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/graph.libsonnet
new file mode 100644
index 0000000..34985a1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/graph.libsonnet
@@ -0,0 +1,257 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ bars=false,
+ dashLength=10,
+ dashes=false,
+ datasource='default',
+ decimals=null,
+ description=null,
+ fill=1,
+ fillGradient=0,
+ hiddenSeries=false,
+ lines=true,
+ linewidth=1,
+ nullPointMode='null',
+ percentage=false,
+ pointradius=null,
+ points=false,
+ repeat=null,
+ repeatDirection=null,
+ spaceLength=10,
+ stack=false,
+ steppedLine=false,
+ timeFrom=null,
+ timeShift=null,
+ title=null,
+ transparent=false,
+ ):: {
+ [if bars != null then 'bars']: bars,
+ [if dashLength != null then 'dashLength']: dashLength,
+ [if dashes != null then 'dashes']: dashes,
+ [if datasource != null then 'datasource']: datasource,
+ [if decimals != null then 'decimals']: decimals,
+ [if description != null then 'description']: description,
+ [if fill != null then 'fill']: fill,
+ [if fillGradient != null then 'fillGradient']: fillGradient,
+ [if hiddenSeries != null then 'hiddenSeries']: hiddenSeries,
+ [if lines != null then 'lines']: lines,
+ [if linewidth != null then 'linewidth']: linewidth,
+ [if nullPointMode != null then 'nullPointMode']: nullPointMode,
+ [if percentage != null then 'percentage']: percentage,
+ [if pointradius != null then 'pointradius']: pointradius,
+ [if points != null then 'points']: points,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatDirection != null then 'repeatDirection']: repeatDirection,
+ [if spaceLength != null then 'spaceLength']: spaceLength,
+ [if stack != null then 'stack']: stack,
+ [if steppedLine != null then 'steppedLine']: steppedLine,
+ [if timeFrom != null then 'timeFrom']: timeFrom,
+ [if timeShift != null then 'timeShift']: timeShift,
+ [if title != null then 'title']: title,
+ [if transparent != null then 'transparent']: transparent,
+ renderer: 'flot',
+ type: 'graph',
+ tooltip+: { value_type: 'individual' },
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+ setLegend(
+ alignAsTable=null,
+ avg=false,
+ current=false,
+ max=false,
+ min=false,
+ rightSide=false,
+ show=true,
+ sideWidth=null,
+ total=false,
+ values=true,
+ ):: self {}
+ + { legend+: { [if alignAsTable != null then 'alignAsTable']: alignAsTable } }
+ + { legend+: { [if avg != null then 'avg']: avg } }
+ + { legend+: { [if current != null then 'current']: current } }
+ + { legend+: { [if max != null then 'max']: max } }
+ + { legend+: { [if min != null then 'min']: min } }
+ + { legend+: { [if rightSide != null then 'rightSide']: rightSide } }
+ + { legend+: { [if show != null then 'show']: show } }
+ + { legend+: { [if sideWidth != null then 'sideWidth']: sideWidth } }
+ + { legend+: { [if total != null then 'total']: total } }
+ + { legend+: { [if values != null then 'values']: values } }
+ ,
+
+ setThresholds(
+ thresholdMode='absolute',
+ ):: self {}
+ + { thresholds+: { [if thresholdMode != null then 'mode']: thresholdMode } }
+ ,
+
+ setTooltip(
+ shared=true,
+ sort=2,
+ ):: self {}
+ + { tooltip+: { [if shared != null then 'shared']: shared } }
+ + { tooltip+: { [if sort != null then 'sort']: sort } }
+ ,
+
+ setXaxis(
+ buckets=null,
+ mode='time',
+ name=null,
+ show=true,
+ ):: self {}
+ + { xaxis+: { [if buckets != null then 'buckets']: buckets } }
+ + { xaxis+: { [if mode != null then 'mode']: mode } }
+ + { xaxis+: { [if name != null then 'name']: name } }
+ + { xaxis+: { [if show != null then 'show']: show } }
+ ,
+
+ setYaxis(
+ align=false,
+ alignLevel=0,
+ ):: self {}
+ + { yaxis+: { [if align != null then 'align']: align } }
+ + { yaxis+: { [if alignLevel != null then 'alignLevel']: alignLevel } }
+ ,
+
+
+ addDataLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { options+: { dataLinks+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] } },
+
+ addPanelLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] },
+
+ addOverride(
+ matcher=null,
+ properties=null,
+ ):: self {}
+ + { fieldConfig+: { overrides+: [
+ {
+ [if matcher != null then 'matcher']: matcher,
+ [if properties != null then 'properties']: properties,
+ },
+ ] } },
+
+ addSeriesOverride(
+ alias=null,
+ bars=null,
+ color=null,
+ dashLength=null,
+ dashes=null,
+ fill=null,
+ fillBelowTo=null,
+ fillGradient=null,
+ hiddenSeries=null,
+ hideTooltip=null,
+ legend=null,
+ lines=null,
+ linewidth=null,
+ nullPointMode=null,
+ pointradius=null,
+ points=null,
+ spaceLength=null,
+ stack=null,
+ steppedLine=null,
+ transform=null,
+ yaxis=null,
+ zindex=null,
+ ):: self {}
+ + { seriesOverrides+: [
+ {
+ [if alias != null then 'alias']: alias,
+ [if bars != null then 'bars']: bars,
+ [if color != null then 'color']: color,
+ [if dashLength != null then 'dashLength']: dashLength,
+ [if dashes != null then 'dashes']: dashes,
+ [if fill != null then 'fill']: fill,
+ [if fillBelowTo != null then 'fillBelowTo']: fillBelowTo,
+ [if fillGradient != null then 'fillGradient']: fillGradient,
+ [if hiddenSeries != null then 'hiddenSeries']: hiddenSeries,
+ [if hideTooltip != null then 'hideTooltip']: hideTooltip,
+ [if legend != null then 'legend']: legend,
+ [if lines != null then 'lines']: lines,
+ [if linewidth != null then 'linewidth']: linewidth,
+ [if nullPointMode != null then 'nullPointMode']: nullPointMode,
+ [if pointradius != null then 'pointradius']: pointradius,
+ [if points != null then 'points']: points,
+ [if spaceLength != null then 'spaceLength']: spaceLength,
+ [if stack != null then 'stack']: stack,
+ [if steppedLine != null then 'steppedLine']: steppedLine,
+ [if transform != null then 'transform']: transform,
+ [if yaxis != null then 'yaxis']: yaxis,
+ [if zindex != null then 'zindex']: zindex,
+ },
+ ] },
+
+ addThresholdStep(
+ color=null,
+ value=null,
+ ):: self {}
+ + { thresholds+: { steps+: [
+ {
+ [if color != null then 'color']: color,
+ [if value != null then 'value']: value,
+ },
+ ] } },
+
+ addTarget(
+ target
+ ):: self {}
+ + { targets+: [
+ target,
+ ] },
+
+ addYaxis(
+ decimals=null,
+ format='short',
+ label=null,
+ logBase=1,
+ max=null,
+ min=null,
+ show=true,
+ ):: self {}
+ + { yaxes+: [
+ {
+ [if decimals != null then 'decimals']: decimals,
+ [if format != null then 'format']: format,
+ [if label != null then 'label']: label,
+ [if logBase != null then 'logBase']: logBase,
+ [if max != null then 'max']: max,
+ [if min != null then 'min']: min,
+ [if show != null then 'show']: show,
+ },
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/row.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/row.libsonnet
new file mode 100644
index 0000000..e8a21d3
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/row.libsonnet
@@ -0,0 +1,45 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ collapse=true,
+ collapsed=true,
+ datasource=null,
+ repeat=null,
+ repeatIteration=null,
+ showTitle=true,
+ title=null,
+ titleSize='h6',
+ ):: {
+ [if collapse != null then 'collapse']: collapse,
+ [if collapsed != null then 'collapsed']: collapsed,
+ [if datasource != null then 'datasource']: datasource,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatIteration != null then 'repeatIteration']: repeatIteration,
+ [if showTitle != null then 'showTitle']: showTitle,
+ [if title != null then 'title']: title,
+ [if titleSize != null then 'titleSize']: titleSize,
+ type: 'row',
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+
+ addPanel(
+ panel
+ ):: self {}
+ + { panels+: [
+ panel,
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/stat.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/stat.libsonnet
new file mode 100644
index 0000000..a14c938
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/stat.libsonnet
@@ -0,0 +1,142 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ datasource='default',
+ description=null,
+ repeat=null,
+ repeatDirection=null,
+ title=null,
+ transparent=false,
+ ):: {
+ [if datasource != null then 'datasource']: datasource,
+ [if description != null then 'description']: description,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatDirection != null then 'repeatDirection']: repeatDirection,
+ [if title != null then 'title']: title,
+ [if transparent != null then 'transparent']: transparent,
+ type: 'stat',
+
+ setFieldConfig(
+ max=null,
+ min=null,
+ thresholdMode='absolute',
+ unit=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { [if max != null then 'max']: max } } }
+ + { fieldConfig+: { defaults+: { [if min != null then 'min']: min } } }
+ + { fieldConfig+: { defaults+: { thresholds+: { [if thresholdMode != null then 'mode']: thresholdMode } } } }
+ + { fieldConfig+: { defaults+: { [if unit != null then 'unit']: unit } } }
+ ,
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+ setOptions(
+ calcs=['mean'],
+ colorMode='value',
+ fields=null,
+ graphMode='none',
+ justifyMode='auto',
+ orientation='auto',
+ textMode='auto',
+ values=false,
+ ):: self {}
+ + { options+: { reduceOptions+: { [if calcs != null then 'calcs']: calcs } } }
+ + { options+: { [if colorMode != null then 'colorMode']: colorMode } }
+ + { options+: { reduceOptions+: { [if fields != null then 'fields']: fields } } }
+ + { options+: { [if graphMode != null then 'graphMode']: graphMode } }
+ + { options+: { [if justifyMode != null then 'justifyMode']: justifyMode } }
+ + { options+: { [if orientation != null then 'orientation']: orientation } }
+ + { options+: { [if textMode != null then 'textMode']: textMode } }
+ + { options+: { reduceOptions+: { [if values != null then 'values']: values } } }
+ ,
+
+
+ addPanelLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] },
+
+ addDataLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] } } },
+
+ addMapping(
+ from=null,
+ id=null,
+ operator=null,
+ text=null,
+ to=null,
+ type=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { mappings+: [
+ {
+ [if from != null then 'from']: from,
+ [if id != null then 'id']: id,
+ [if operator != null then 'operator']: operator,
+ [if text != null then 'text']: text,
+ [if to != null then 'to']: to,
+ [if type != null then 'type']: type,
+ [if value != null then 'value']: value,
+ },
+ ] } } },
+
+ addOverride(
+ matcher=null,
+ properties=null,
+ ):: self {}
+ + { fieldConfig+: { overrides+: [
+ {
+ [if matcher != null then 'matcher']: matcher,
+ [if properties != null then 'properties']: properties,
+ },
+ ] } },
+
+ addThresholdStep(
+ color=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { thresholds+: { steps+: [
+ {
+ [if color != null then 'color']: color,
+ [if value != null then 'value']: value,
+ },
+ ] } } } },
+
+ addTarget(
+ target
+ ):: self {}
+ + { targets+: [
+ target,
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/table.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/table.libsonnet
new file mode 100644
index 0000000..981d1ea
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/table.libsonnet
@@ -0,0 +1,134 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ datasource='default',
+ description=null,
+ repeat=null,
+ repeatDirection=null,
+ title=null,
+ transparent=false,
+ ):: {
+ [if datasource != null then 'datasource']: datasource,
+ [if description != null then 'description']: description,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatDirection != null then 'repeatDirection']: repeatDirection,
+ [if title != null then 'title']: title,
+ [if transparent != null then 'transparent']: transparent,
+ type: 'table',
+
+ setFieldConfig(
+ displayName=null,
+ max=null,
+ min=null,
+ thresholdMode='absolute',
+ noValue=null,
+ unit='short',
+ width=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { [if displayName != null then 'displayName']: displayName } } }
+ + { fieldConfig+: { defaults+: { [if max != null then 'max']: max } } }
+ + { fieldConfig+: { defaults+: { [if min != null then 'min']: min } } }
+ + { fieldConfig+: { defaults+: { thresholds+: { [if thresholdMode != null then 'mode']: thresholdMode } } } }
+ + { fieldConfig+: { defaults+: { [if noValue != null then 'noValue']: noValue } } }
+ + { fieldConfig+: { defaults+: { [if unit != null then 'unit']: unit } } }
+ + { fieldConfig+: { defaults+: { custom+: { [if width != null then 'width']: width } } } }
+ ,
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+ setOptions(
+ showHeader=true,
+ ):: self {}
+ + { options+: { [if showHeader != null then 'showHeader']: showHeader } }
+ ,
+
+
+ addDataLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] } } },
+
+ addPanelLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] },
+
+ addMapping(
+ from=null,
+ id=null,
+ operator=null,
+ text=null,
+ to=null,
+ type=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { mappings+: [
+ {
+ [if from != null then 'from']: from,
+ [if id != null then 'id']: id,
+ [if operator != null then 'operator']: operator,
+ [if text != null then 'text']: text,
+ [if to != null then 'to']: to,
+ [if type != null then 'type']: type,
+ [if value != null then 'value']: value,
+ },
+ ] } } },
+
+ addOverride(
+ matcher=null,
+ properties=null,
+ ):: self {}
+ + { fieldConfig+: { overrides+: [
+ {
+ [if matcher != null then 'matcher']: matcher,
+ [if properties != null then 'properties']: properties,
+ },
+ ] } },
+
+ addThresholdStep(
+ color=null,
+ value=null,
+ ):: self {}
+ + { fieldConfig+: { defaults+: { thresholds+: { steps+: [
+ {
+ [if color != null then 'color']: color,
+ [if value != null then 'value']: value,
+ },
+ ] } } } },
+
+ addTarget(
+ target
+ ):: self {}
+ + { targets+: [
+ target,
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/text.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/text.libsonnet
new file mode 100644
index 0000000..1c4c682
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/panel/text.libsonnet
@@ -0,0 +1,58 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ content=null,
+ datasource='default',
+ description=null,
+ mode='markdown',
+ repeat=null,
+ repeatDirection=null,
+ title=null,
+ transparent=false,
+ ):: {
+ [if content != null then 'content']: content,
+ [if datasource != null then 'datasource']: datasource,
+ [if description != null then 'description']: description,
+ [if mode != null then 'mode']: mode,
+ [if repeat != null then 'repeat']: repeat,
+ [if repeatDirection != null then 'repeatDirection']: repeatDirection,
+ [if title != null then 'title']: title,
+ [if transparent != null then 'transparent']: transparent,
+ type: 'text',
+
+ setGridPos(
+ h=8,
+ w=12,
+ x=null,
+ y=null,
+ ):: self {}
+ + { gridPos+: { [if h != null then 'h']: h } }
+ + { gridPos+: { [if w != null then 'w']: w } }
+ + { gridPos+: { [if x != null then 'x']: x } }
+ + { gridPos+: { [if y != null then 'y']: y } }
+ ,
+
+
+ addPanelLink(
+ targetBlank=true,
+ title=null,
+ url=null,
+ ):: self {}
+ + { links+: [
+ {
+ [if targetBlank != null then 'targetBlank']: targetBlank,
+ [if title != null then 'title']: title,
+ [if url != null then 'url']: url,
+ },
+ ] },
+
+ addTarget(
+ target
+ ):: self {}
+ + { targets+: [
+ target,
+ ] },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/target/prometheus.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/target/prometheus.libsonnet
new file mode 100644
index 0000000..e6e5fa0
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/target/prometheus.libsonnet
@@ -0,0 +1,21 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ datasource='default',
+ expr=null,
+ format='time_series',
+ instant=null,
+ interval=null,
+ intervalFactor=null,
+ legendFormat=null,
+ ):: {
+ [if datasource != null then 'datasource']: datasource,
+ [if expr != null then 'expr']: expr,
+ [if format != null then 'format']: format,
+ [if instant != null then 'instant']: instant,
+ [if interval != null then 'interval']: interval,
+ [if intervalFactor != null then 'intervalFactor']: intervalFactor,
+ [if legendFormat != null then 'legendFormat']: legendFormat,
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/custom.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/custom.libsonnet
new file mode 100644
index 0000000..3755b62
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/custom.libsonnet
@@ -0,0 +1,36 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ allValue=null,
+ hide=0,
+ includeAll=false,
+ label=null,
+ multi=false,
+ name=null,
+ query=null,
+ queryValue='',
+ skipUrlSync=false,
+ ):: {
+ [if allValue != null then 'allValue']: allValue,
+ [if hide != null then 'hide']: hide,
+ [if includeAll != null then 'includeAll']: includeAll,
+ [if label != null then 'label']: label,
+ [if multi != null then 'multi']: multi,
+ [if name != null then 'name']: name,
+ [if query != null then 'query']: query,
+ [if queryValue != null then 'queryValue']: queryValue,
+ [if skipUrlSync != null then 'skipUrlSync']: skipUrlSync,
+ type: 'custom',
+
+ setCurrent(
+ selected=false,
+ text=null,
+ value=null,
+ ):: self {}
+ + { current+: { [if selected != null then 'selected']: selected } }
+ + { current+: { [if text != null then 'text']: text } }
+ + { current+: { [if value != null then 'value']: value } },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/datasource.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/datasource.libsonnet
new file mode 100644
index 0000000..0bdaf83
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/datasource.libsonnet
@@ -0,0 +1,36 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ hide=0,
+ includeAll=false,
+ label=null,
+ multi=false,
+ name=null,
+ query=null,
+ refresh=1,
+ regex=null,
+ skipUrlSync=false,
+ ):: {
+ [if hide != null then 'hide']: hide,
+ [if includeAll != null then 'includeAll']: includeAll,
+ [if label != null then 'label']: label,
+ [if multi != null then 'multi']: multi,
+ [if name != null then 'name']: name,
+ [if query != null then 'query']: query,
+ [if refresh != null then 'refresh']: refresh,
+ [if regex != null then 'regex']: regex,
+ [if skipUrlSync != null then 'skipUrlSync']: skipUrlSync,
+ type: 'datasource',
+
+ setCurrent(
+ selected=false,
+ text=null,
+ value=null,
+ ):: self {}
+ + { current+: { [if selected != null then 'selected']: selected } }
+ + { current+: { [if text != null then 'text']: text } }
+ + { current+: { [if value != null then 'value']: value } },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/query.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/query.libsonnet
new file mode 100644
index 0000000..951cef7
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/grafana/grafonnet-lib/grafonnet-7.0/template/query.libsonnet
@@ -0,0 +1,52 @@
+// This file was generated by https://github.com/grafana/dashboard-spec
+
+{
+ new(
+ allValue=null,
+ datasource=null,
+ definition=null,
+ hide=0,
+ includeAll=false,
+ label=null,
+ multi=false,
+ name=null,
+ query=null,
+ refresh=0,
+ regex=null,
+ skipUrlSync=false,
+ sort=0,
+ tagValuesQuery=null,
+ tags=null,
+ tagsQuery=null,
+ useTags=false,
+ ):: {
+ [if allValue != null then 'allValue']: allValue,
+ [if datasource != null then 'datasource']: datasource,
+ [if definition != null then 'definition']: definition,
+ [if hide != null then 'hide']: hide,
+ [if includeAll != null then 'includeAll']: includeAll,
+ [if label != null then 'label']: label,
+ [if multi != null then 'multi']: multi,
+ [if name != null then 'name']: name,
+ [if query != null then 'query']: query,
+ [if refresh != null then 'refresh']: refresh,
+ [if regex != null then 'regex']: regex,
+ [if skipUrlSync != null then 'skipUrlSync']: skipUrlSync,
+ [if sort != null then 'sort']: sort,
+ [if tagValuesQuery != null then 'tagValuesQuery']: tagValuesQuery,
+ [if tags != null then 'tags']: tags,
+ [if tagsQuery != null then 'tagsQuery']: tagsQuery,
+ [if useTags != null then 'useTags']: useTags,
+ type: 'query',
+
+ setCurrent(
+ selected=null,
+ text=null,
+ value=null,
+ ):: self {}
+ + { current+: { [if selected != null then 'selected']: selected } }
+ + { current+: { [if text != null then 'text']: text } }
+ + { current+: { [if value != null then 'value']: value } },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/check-with-upstream.yaml b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/check-with-upstream.yaml
new file mode 100644
index 0000000..00fd90f
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/check-with-upstream.yaml
@@ -0,0 +1,14 @@
+name: check-with-upstream
+# Run every Monday.
+on:
+ schedule:
+ - cron: '0 0 * * 1'
+jobs:
+ check-selectors-ksm:
+ runs-on: ubuntu-latest
+ name: Check if KSM selectors are present on applicable metrics.
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make check-selectors-ksm
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/ci.yaml b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/ci.yaml
new file mode 100644
index 0000000..bd17701
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.github/workflows/ci.yaml
@@ -0,0 +1,53 @@
+name: ci
+on:
+ - push
+ - pull_request
+jobs:
+ generate:
+ runs-on: ubuntu-latest
+ name: Generate yaml
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make generate && git diff --exit-code
+ jsonnet-lint:
+ runs-on: ubuntu-latest
+ name: Jsonnet linter
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make jsonnet-lint
+ dashboards-lint:
+ runs-on: ubuntu-latest
+ name: Grafana dashboard linter
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make dashboards-lint
+ alerts-lint:
+ runs-on: ubuntu-latest
+ name: Alerts linter
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make alerts-lint
+ fmt:
+ runs-on: ubuntu-latest
+ name: Jsonnet formatter
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make fmt && git diff --exit-code
+ unit-tests:
+ runs-on: ubuntu-latest
+ name: Unit tests
+ steps:
+ - uses: actions/checkout@v2
+ with:
+ persist-credentials: false
+ - run: make --always-make test
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.gitignore b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.gitignore
new file mode 100644
index 0000000..a4353d9
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.gitignore
@@ -0,0 +1,6 @@
+prometheus_alerts.yaml
+prometheus_rules.yaml
+dashboards_out
+vendor
+jsonnetfile.lock.json
+tmp
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.lint b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.lint
new file mode 100644
index 0000000..4b01192
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/.lint
@@ -0,0 +1,3 @@
+exclusions:
+ template-job-rule:
+ panel-job-instance-rule:
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/LICENSE b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/Makefile b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/Makefile
new file mode 100644
index 0000000..f88faca
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/Makefile
@@ -0,0 +1,84 @@
+BIN_DIR ?= $(shell pwd)/tmp/bin
+
+JSONNET_VENDOR=vendor
+GRAFANA_DASHBOARD_LINTER_BIN=$(BIN_DIR)/dashboard-linter
+JB_BIN=$(BIN_DIR)/jb
+JSONNET_BIN=$(BIN_DIR)/jsonnet
+JSONNETLINT_BIN=$(BIN_DIR)/jsonnet-lint
+JSONNETFMT_BIN=$(BIN_DIR)/jsonnetfmt
+PROMTOOL_BIN=$(BIN_DIR)/promtool
+TOOLING=$(JB_BIN) $(JSONNETLINT_BIN) $(JSONNET_BIN) $(JSONNETFMT_BIN) $(PROMTOOL_BIN) $(GRAFANA_DASHBOARD_LINTER_BIN)
+JSONNETFMT_ARGS=-n 2 --max-blank-lines 2 --string-style s --comment-style s
+SRC_DIR ?=dashboards
+OUT_DIR ?=dashboards_out
+
+.PHONY: all
+all: fmt generate lint test
+
+.PHONY: generate
+generate: prometheus_alerts.yaml prometheus_rules.yaml $(OUT_DIR)
+
+$(JSONNET_VENDOR): $(JB_BIN) jsonnetfile.json
+ $(JB_BIN) install
+
+.PHONY: fmt
+fmt: $(JSONNETFMT_BIN)
+ find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ xargs -n 1 -- $(JSONNETFMT_BIN) $(JSONNETFMT_ARGS) -i
+
+prometheus_alerts.yaml: $(JSONNET_BIN) mixin.libsonnet lib/alerts.jsonnet alerts/*.libsonnet
+ @$(JSONNET_BIN) -J vendor -S lib/alerts.jsonnet > $@
+
+prometheus_rules.yaml: $(JSONNET_BIN) mixin.libsonnet lib/rules.jsonnet rules/*.libsonnet
+ @$(JSONNET_BIN) -J vendor -S lib/rules.jsonnet > $@
+
+$(OUT_DIR): $(JSONNET_BIN) $(JSONNET_VENDOR) mixin.libsonnet lib/dashboards.jsonnet $(SRC_DIR)/*.libsonnet
+ @mkdir -p $(OUT_DIR)
+ @$(JSONNET_BIN) -J vendor -m $(OUT_DIR) lib/dashboards.jsonnet
+
+.PHONY: lint
+lint: jsonnet-lint alerts-lint dashboards-lint
+
+.PHONY: jsonnet-lint
+jsonnet-lint: $(JSONNETLINT_BIN) $(JSONNET_VENDOR)
+ @find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ xargs -n 1 -- $(JSONNETLINT_BIN) -J vendor
+
+
+.PHONY: alerts-lint
+alerts-lint: $(PROMTOOL_BIN) prometheus_alerts.yaml prometheus_rules.yaml
+ @$(PROMTOOL_BIN) check rules prometheus_rules.yaml
+ @$(PROMTOOL_BIN) check rules prometheus_alerts.yaml
+
+$(OUT_DIR)/.lint: $(OUT_DIR)
+ @cp .lint $@
+
+.PHONY: dashboards-lint
+dashboards-lint: $(GRAFANA_DASHBOARD_LINTER_BIN) $(OUT_DIR)/.lint
+ # Replace $$interval:$$resolution var with $$__rate_interval to make dashboard-linter happy.
+ @sed -i -e 's/$$interval:$$resolution/$$__rate_interval/g' $(OUT_DIR)/*.json
+ @find $(OUT_DIR) -name '*.json' -print0 | xargs -n 1 -0 $(GRAFANA_DASHBOARD_LINTER_BIN) lint --strict
+
+
+.PHONY: clean
+clean:
+ # Remove all files and directories ignored by git.
+ git clean -Xfd .
+
+.PHONY: test
+test: $(PROMTOOL_BIN) prometheus_alerts.yaml prometheus_rules.yaml
+ @$(PROMTOOL_BIN) test rules tests.yaml
+
+$(BIN_DIR):
+ mkdir -p $(BIN_DIR)
+
+$(TOOLING): $(BIN_DIR)
+ @echo Installing tools from hack/tools.go
+ @cd scripts && go list -mod=mod -tags tools -f '{{ range .Imports }}{{ printf "%s\n" .}}{{end}}' ./ | xargs -tI % go build -mod=mod -o $(BIN_DIR) %
+
+########################################
+# "check-with-upstream" workflow checks.
+########################################
+
+check-selectors-ksm:
+ @./scripts/check-selectors-ksm.sh
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/OWNERS b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/OWNERS
new file mode 100644
index 0000000..fb831d5
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/OWNERS
@@ -0,0 +1,19 @@
+# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md
+
+approvers:
+- brancz
+- csmarchbanks
+- metalmatze
+- tomwilkie
+- s-urbaniak
+- povilasv
+- paulfantom
+
+reviewers:
+- brancz
+- csmarchbanks
+- metalmatze
+- tomwilkie
+- s-urbaniak
+- povilasv
+- paulfantom
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md
new file mode 100644
index 0000000..e4cee2d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/README.md
@@ -0,0 +1,256 @@
+# Prometheus Monitoring Mixin for Kubernetes
+[](https://github.com/kubernetes-monitoring/kubernetes-mixin/actions/workflows/ci.yaml)
+
+> NOTE: This project is *pre-release* stage. Flags, configuration, behaviour and design may change significantly in following releases.
+
+A set of Grafana dashboards and Prometheus alerts for Kubernetes.
+
+## Releases
+
+| Release branch | Kubernetes Compatibility | Prometheus Compatibility | Kube-state-metrics Compatibility |
+| -------------- | -------------------------- | ------------------------ | -------------------------------- |
+| release-0.1 | v1.13 and before | | |
+| release-0.2 | v1.14.1 and before | v2.11.0+ | |
+| release-0.3 | v1.17 and before | v2.11.0+ | |
+| release-0.4 | v1.18 | v2.11.0+ | |
+| release-0.5 | v1.19 | v2.11.0+ | |
+| release-0.6 | v1.19+ | v2.11.0+ | |
+| release-0.7 | v1.19+ | v2.11.0+ | v1.x |
+| release-0.8 | v1.20+ | v2.11.0+ | v2.0+ |
+| release-0.9 | v1.20+ | v2.11.0+ | v2.0+ |
+| release-0.10 | v1.20+ | v2.11.0+ | v2.0+ |
+| release-0.11 | v1.23+ | v2.11.0+ | v2.0+ |
+| release-0.12 | v1.23+ | v2.11.0+ | v2.0+ |
+| master | v1.23+ | v2.11.0+ | v2.0+ |
+
+In Kubernetes 1.14 there was a major [metrics overhaul](https://github.com/kubernetes/enhancements/issues/1206) implemented.
+Therefore v0.1.x of this repository is the last release to support Kubernetes 1.13 and previous version on a best effort basis.
+
+Some alerts now use Prometheus filters made available in Prometheus 2.11.0, which makes this version of Prometheus a dependency.
+
+Warning: This compatibility matrix was initially created based on experience, we do not guarantee the compatibility, it may be updated based on new learnings.
+
+Warning: By default the expressions will generate *grafana 7.2+* compatible rules using the *$__rate_interval* variable for rate functions. If you need backward compatible rules please set *grafana72: false* in your *_config*
+
+## How to use
+
+This mixin is designed to be vendored into the repo with your infrastructure config.
+To do this, use [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler):
+
+You then have three options for deploying your dashboards
+1. Generate the config files and deploy them yourself
+1. Use ksonnet to deploy this mixin along with Prometheus and Grafana
+1. Use prometheus-operator to deploy this mixin (TODO)
+
+## Generate config files
+
+You can manually generate the alerts, dashboards and rules files, but first you
+must install some tools:
+
+```
+$ go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
+$ brew install jsonnet
+```
+
+Then, grab the mixin and its dependencies:
+
+```
+$ git clone https://github.com/kubernetes-monitoring/kubernetes-mixin
+$ cd kubernetes-mixin
+$ jb install
+```
+
+Finally, build the mixin:
+
+```
+$ make prometheus_alerts.yaml
+$ make prometheus_rules.yaml
+$ make dashboards_out
+```
+
+The `prometheus_alerts.yaml` and `prometheus_rules.yaml` file then need to passed
+to your Prometheus server, and the files in `dashboards_out` need to be imported
+into you Grafana server. The exact details will depending on how you deploy your
+monitoring stack to Kubernetes.
+
+### Dashboards for Windows Nodes
+There are separate dashboards for windows resources.
+1) Compute Resources / Cluster(Windows)
+2) Compute Resources / Namespace(Windows)
+3) Compute Resources / Pod(Windows)
+4) USE Method / Cluster(Windows)
+5) USE Method / Node(Windows)
+
+These dashboards are based on metrics populated by [windows-exporter](https://github.com/prometheus-community/windows_exporter) from each Windows node.
+
+## Running the tests
+
+```sh
+make test
+```
+
+## Using with prometheus-ksonnet
+
+Alternatively you can also use the mixin with
+[prometheus-ksonnet](https://github.com/kausalco/public/tree/master/prometheus-ksonnet),
+a [ksonnet](https://github.com/ksonnet/ksonnet) module to deploy a fully-fledged
+Prometheus-based monitoring system for Kubernetes:
+
+Make sure you have the ksonnet v0.8.0:
+
+```
+$ brew install https://raw.githubusercontent.com/ksonnet/homebrew-tap/82ef24cb7b454d1857db40e38671426c18cd8820/ks.rb
+$ brew pin ks
+$ ks version
+ksonnet version: v0.8.0
+jsonnet version: v0.9.5
+client-go version: v1.6.8-beta.0+$Format:%h$
+```
+
+In your config repo, if you don't have a ksonnet application, make a new one (will copy credentials from current context):
+
+```
+$ ks init <application name>
+$ cd <application name>
+$ ks env add default
+```
+
+Grab the kubernetes-jsonnet module using and its dependencies, which include
+the kubernetes-mixin:
+
+```
+$ go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
+$ jb init
+$ jb install github.com/kausalco/public/prometheus-ksonnet
+```
+
+Assuming you want to run in the default namespace ('environment' in ksonnet parlance), add the follow to the file `environments/default/main.jsonnet`:
+
+```jsonnet
+local prometheus = import "prometheus-ksonnet/prometheus-ksonnet.libsonnet";
+
+prometheus {
+ _config+:: {
+ namespace: "default",
+ },
+}
+```
+
+Apply your config:
+
+```
+$ ks apply default
+```
+
+## Using prometheus-operator
+
+TODO
+
+## Multi-cluster support
+
+Kubernetes-mixin can support dashboards across multiple clusters. You need either a multi-cluster [Thanos](https://github.com/improbable-eng/thanos) installation with `external_labels` configured or a [Cortex](https://github.com/cortexproject/cortex) system where a cluster label exists. To enable this feature you need to configure the following:
+
+```jsonnet
+ // Opt-in to multiCluster dashboards by overriding this and the clusterLabel.
+ showMultiCluster: true,
+ clusterLabel: '<your cluster label>',
+```
+
+## Customising the mixin
+
+Kubernetes-mixin allows you to override the selectors used for various jobs,
+to match those used in your Prometheus set. You can also customize the dashboard
+names and add grafana tags.
+
+In a new directory, add a file `mixin.libsonnet`:
+
+```jsonnet
+local kubernetes = import "kubernetes-mixin/mixin.libsonnet";
+
+kubernetes {
+ _config+:: {
+ kubeStateMetricsSelector: 'job="kube-state-metrics"',
+ cadvisorSelector: 'job="kubernetes-cadvisor"',
+ nodeExporterSelector: 'job="kubernetes-node-exporter"',
+ kubeletSelector: 'job="kubernetes-kubelet"',
+ grafanaK8s+:: {
+ dashboardNamePrefix: 'Mixin / ',
+ dashboardTags: ['kubernetes', 'infrastucture'],
+ },
+ },
+}
+```
+
+Then, install the kubernetes-mixin:
+
+```
+$ jb init
+$ jb install github.com/kubernetes-monitoring/kubernetes-mixin
+```
+
+Generate the alerts, rules and dashboards:
+
+```
+$ jsonnet -J vendor -S -e 'std.manifestYamlDoc((import "mixin.libsonnet").prometheusAlerts)' > alerts.yml
+$ jsonnet -J vendor -S -e 'std.manifestYamlDoc((import "mixin.libsonnet").prometheusRules)' >files/rules.yml
+$ jsonnet -J vendor -m files/dashboards -e '(import "mixin.libsonnet").grafanaDashboards'
+```
+
+### Customising alert annotations
+
+The steps described below extend on the existing mixin library without modifying the original git repository. This is to make consuming updates to your extended alert definitions easier. These definitions can reside outside of this repository and added to your own custom location, where you can define your alert dependencies in your `jsonnetfile.json` and add customisations to the existing definitions.
+
+In your working directory, create a new file `kubernetes_mixin_override.libsonnet` with the following:
+
+```jsonnet
+local utils = import 'lib/utils.libsonnet';
+(import 'mixin.libsonnet') +
+(
+ {
+ prometheusAlerts+::
+ // The specialAlerts can be in any other config file
+ local slack = 'observability';
+ local specialAlerts = {
+ KubePodCrashLooping: { slack_channel: slack },
+ KubePodNotReady: { slack_channel: slack },
+ };
+
+ local addExtraAnnotations(rule) = rule {
+ [if 'alert' in rule then 'annotations']+: {
+ dashboard: 'https://foo.bar.co',
+ [if rule.alert in specialAlerts then 'slack_channel']: specialAlerts[rule.alert].slack_channel,
+ },
+ };
+ utils.mapRuleGroups(addExtraAnnotations),
+ }
+)
+```
+Create new file: `lib/kubernetes_customised_alerts.jsonnet` with the following:
+
+```jsonnet
+std.manifestYamlDoc((import '../kubernetes_mixin_override.libsonnet').prometheusAlerts)
+```
+Running `jsonnet -S lib/kubernetes_customised_alerts.jsonnet` will build the alerts with your customisations.
+
+Same result can be achieved by modyfying the existing `config.libsonnet` with the content of `kubernetes_mixin_override.libsonnet`.
+
+## Background
+
+### Alert Severities
+While the community has not yet fully agreed on alert severities and their to be used, this repository assumes the following paradigms when setting the severities:
+
+* Critical: An issue, that needs to page a person to take instant action
+* Warning: An issue, that needs to be worked on but in the regular work queue or for during office hours rather than paging the oncall
+* Info: Is meant to support a trouble shooting process by informing about a non-normal situation for one or more systems but not worth a page or ticket on its own.
+
+
+### Architecture and Technical Decisions
+
+* For more motivation, see
+"[The RED Method: How to instrument your services](https://kccncna17.sched.com/event/CU8K/the-red-method-how-to-instrument-your-services-b-tom-wilkie-kausal?iframe=no&w=100%&sidebar=yes&bg=no)" talk from CloudNativeCon Austin.
+* For more information about monitoring mixins, see this [design doc](https://docs.google.com/document/d/1A9xvzwqnFVSOZ5fD3blKODXfsat5fg6ZhnKu9LK3lB4/edit#).
+
+## Note
+
+You can use the external tool call [prom-metrics-check](https://github.com/ContainerSolutions/prom-metrics-check) to validate the created dashboards. This tool allows you to check if the metrics installed and used in Grafana dashboards exist in the Prometheus instance.
+Please have a look at https://github.com/ContainerSolutions/prom-metrics-check.
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/SECURITY_CONTACTS b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/SECURITY_CONTACTS
new file mode 100644
index 0000000..0f85c94
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/SECURITY_CONTACTS
@@ -0,0 +1,16 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+brancz
+csmarchbanks
+metalmatze
+tomwilkie
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/alerts.libsonnet
new file mode 100644
index 0000000..929e9f1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/alerts.libsonnet
@@ -0,0 +1,10 @@
+(import 'apps_alerts.libsonnet') +
+(import 'resource_alerts.libsonnet') +
+(import 'storage_alerts.libsonnet') +
+(import 'system_alerts.libsonnet') +
+(import 'kube_apiserver.libsonnet') +
+(import 'kubelet.libsonnet') +
+(import 'kube_scheduler.libsonnet') +
+(import 'kube_controller_manager.libsonnet') +
+(import 'kube_proxy.libsonnet') +
+(import '../lib/add-runbook-links.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet
new file mode 100644
index 0000000..63e8d9e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/apps_alerts.libsonnet
@@ -0,0 +1,328 @@
+{
+ _config+:: {
+ kubeStateMetricsSelector: error 'must provide selector for kube-state-metrics',
+ kubeJobTimeoutDuration: error 'must provide value for kubeJobTimeoutDuration',
+ namespaceSelector: null,
+ prefixedNamespaceSelector: if self.namespaceSelector != null then self.namespaceSelector + ',' else '',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-apps',
+ rules: [
+ {
+ expr: |||
+ max_over_time(kube_pod_container_status_waiting_reason{reason="CrashLoopBackOff", %(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[5m]) >= 1
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Pod {{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is in waiting state (reason: "CrashLoopBackOff").',
+ summary: 'Pod is crash looping.',
+ },
+ 'for': '15m',
+ alert: 'KubePodCrashLooping',
+ },
+ {
+ // We wrap kube_pod_owner with the topk() aggregator to ensure that
+ // every (namespace, pod, %(clusterLabel)s) tuple is unique even if the "owner_kind"
+ // label exists for 2 values. This avoids "many-to-many matching
+ // not allowed" errors when joining with kube_pod_status_phase.
+ expr: |||
+ sum by (namespace, pod, %(clusterLabel)s) (
+ max by(namespace, pod, %(clusterLabel)s) (
+ kube_pod_status_phase{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, phase=~"Pending|Unknown|Failed"}
+ ) * on(namespace, pod, %(clusterLabel)s) group_left(owner_kind) topk by(namespace, pod, %(clusterLabel)s) (
+ 1, max by(namespace, pod, owner_kind, %(clusterLabel)s) (kube_pod_owner{owner_kind!="Job"})
+ )
+ ) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Pod {{ $labels.namespace }}/{{ $labels.pod }} has been in a non-ready state for longer than 15 minutes.',
+ summary: 'Pod has been in a non-ready state for more than 15 minutes.',
+ },
+ 'for': '15m',
+ alert: 'KubePodNotReady',
+ },
+ {
+ expr: |||
+ kube_deployment_status_observed_generation{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_deployment_metadata_generation{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Deployment generation for {{ $labels.namespace }}/{{ $labels.deployment }} does not match, this indicates that the Deployment has failed but has not been rolled back.',
+ summary: 'Deployment generation mismatch due to possible roll-back',
+ },
+ 'for': '15m',
+ alert: 'KubeDeploymentGenerationMismatch',
+ },
+ {
+ expr: |||
+ (
+ kube_deployment_spec_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ >
+ kube_deployment_status_replicas_available{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ) and (
+ changes(kube_deployment_status_replicas_updated{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[10m])
+ ==
+ 0
+ )
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Deployment {{ $labels.namespace }}/{{ $labels.deployment }} has not matched the expected number of replicas for longer than 15 minutes.',
+ summary: 'Deployment has not matched the expected number of replicas.',
+ },
+ 'for': '15m',
+ alert: 'KubeDeploymentReplicasMismatch',
+ },
+ {
+ expr: |||
+ kube_deployment_status_condition{condition="Progressing", status="false",%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ != 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Rollout of deployment {{ $labels.namespace }}/{{ $labels.deployment }} is not progressing for longer than 15 minutes.',
+ summary: 'Deployment rollout is not progressing.',
+ },
+ 'for': '15m',
+ alert: 'KubeDeploymentRolloutStuck',
+ },
+ {
+ expr: |||
+ (
+ kube_statefulset_status_replicas_ready{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_statefulset_status_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ) and (
+ changes(kube_statefulset_status_replicas_updated{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[10m])
+ ==
+ 0
+ )
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} has not matched the expected number of replicas for longer than 15 minutes.',
+ summary: 'StatefulSet has not matched the expected number of replicas.',
+ },
+ 'for': '15m',
+ alert: 'KubeStatefulSetReplicasMismatch',
+ },
+ {
+ expr: |||
+ kube_statefulset_status_observed_generation{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_statefulset_metadata_generation{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'StatefulSet generation for {{ $labels.namespace }}/{{ $labels.statefulset }} does not match, this indicates that the StatefulSet has failed but has not been rolled back.',
+ summary: 'StatefulSet generation mismatch due to possible roll-back',
+ },
+ 'for': '15m',
+ alert: 'KubeStatefulSetGenerationMismatch',
+ },
+ {
+ expr: |||
+ (
+ max without (revision) (
+ kube_statefulset_status_current_revision{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ unless
+ kube_statefulset_status_update_revision{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ )
+ *
+ (
+ kube_statefulset_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_statefulset_status_replicas_updated{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ )
+ ) and (
+ changes(kube_statefulset_status_replicas_updated{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[5m])
+ ==
+ 0
+ )
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} update has not been rolled out.',
+ summary: 'StatefulSet update has not been rolled out.',
+ },
+ 'for': '15m',
+ alert: 'KubeStatefulSetUpdateNotRolledOut',
+ },
+ {
+ alert: 'KubeDaemonSetRolloutStuck',
+ expr: |||
+ (
+ (
+ kube_daemonset_status_current_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_daemonset_status_desired_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ) or (
+ kube_daemonset_status_number_misscheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ 0
+ ) or (
+ kube_daemonset_status_updated_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_daemonset_status_desired_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ) or (
+ kube_daemonset_status_number_available{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_daemonset_status_desired_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ )
+ ) and (
+ changes(kube_daemonset_status_updated_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[5m])
+ ==
+ 0
+ )
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} has not finished or progressed for at least 15 minutes.',
+ summary: 'DaemonSet rollout is stuck.',
+ },
+ 'for': '15m',
+ },
+ {
+ expr: |||
+ sum by (namespace, pod, container, %(clusterLabel)s) (kube_pod_container_status_waiting_reason{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'pod/{{ $labels.pod }} in namespace {{ $labels.namespace }} on container {{ $labels.container}} has been in waiting state for longer than 1 hour.',
+ summary: 'Pod container waiting longer than 1 hour',
+ },
+ 'for': '1h',
+ alert: 'KubeContainerWaiting',
+ },
+ {
+ alert: 'KubeDaemonSetNotScheduled',
+ expr: |||
+ kube_daemonset_status_desired_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ -
+ kube_daemonset_status_current_number_scheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are not scheduled.',
+ summary: 'DaemonSet pods are not scheduled.',
+ },
+ 'for': '10m',
+ },
+ {
+ alert: 'KubeDaemonSetMisScheduled',
+ expr: |||
+ kube_daemonset_status_number_misscheduled{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: '{{ $value }} Pods of DaemonSet {{ $labels.namespace }}/{{ $labels.daemonset }} are running where they are not supposed to run.',
+ summary: 'DaemonSet pods are misscheduled.',
+ },
+ 'for': '15m',
+ },
+ {
+ alert: 'KubeJobNotCompleted',
+ expr: |||
+ time() - max by(namespace, job_name, %(clusterLabel)s) (kube_job_status_start_time{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ and
+ kube_job_status_active{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} > 0) > %(kubeJobTimeoutDuration)s
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than {{ "%(kubeJobTimeoutDuration)s" | humanizeDuration }} to complete.' % $._config,
+ summary: 'Job did not complete in time',
+ },
+ },
+ {
+ alert: 'KubeJobFailed',
+ expr: |||
+ kube_job_failed{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} > 0
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete. Removing failed job after investigation should clear this alert.',
+ summary: 'Job failed to complete.',
+ },
+ },
+ {
+ expr: |||
+ (kube_horizontalpodautoscaler_status_desired_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ !=
+ kube_horizontalpodautoscaler_status_current_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s})
+ and
+ (kube_horizontalpodautoscaler_status_current_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ >
+ kube_horizontalpodautoscaler_spec_min_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s})
+ and
+ (kube_horizontalpodautoscaler_status_current_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ <
+ kube_horizontalpodautoscaler_spec_max_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s})
+ and
+ changes(kube_horizontalpodautoscaler_status_current_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}[15m]) == 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has not matched the desired number of replicas for longer than 15 minutes.',
+ summary: 'HPA has not matched desired number of replicas.',
+ },
+ 'for': '15m',
+ alert: 'KubeHpaReplicasMismatch',
+ },
+ {
+ expr: |||
+ kube_horizontalpodautoscaler_status_current_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ==
+ kube_horizontalpodautoscaler_spec_max_replicas{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s}
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'HPA {{ $labels.namespace }}/{{ $labels.horizontalpodautoscaler }} has been running at max replicas for longer than 15 minutes.',
+ summary: 'HPA is running at max replicas',
+ },
+ 'for': '15m',
+ alert: 'KubeHpaMaxedOut',
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_apiserver.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_apiserver.libsonnet
new file mode 100644
index 0000000..5639454
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_apiserver.libsonnet
@@ -0,0 +1,126 @@
+local utils = import '../lib/utils.libsonnet';
+
+{
+ _config+:: {
+ kubeApiserverSelector: error 'must provide selector for kube-apiserver',
+
+ kubeAPILatencyWarningSeconds: 1,
+
+ certExpirationWarningSeconds: 7 * 24 * 3600,
+ certExpirationCriticalSeconds: 1 * 24 * 3600,
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kube-apiserver-slos',
+ rules: [
+ {
+ alert: 'KubeAPIErrorBudgetBurn',
+ expr: |||
+ sum(apiserver_request:burnrate%s) > (%.2f * %.5f)
+ and
+ sum(apiserver_request:burnrate%s) > (%.2f * %.5f)
+ ||| % [
+ w.long,
+ w.factor,
+ (1 - $._config.SLOs.apiserver.target),
+ w.short,
+ w.factor,
+ (1 - $._config.SLOs.apiserver.target),
+ ],
+ labels: {
+ severity: w.severity,
+ short: '%(short)s' % w,
+ long: '%(long)s' % w,
+ },
+ annotations: {
+ description: 'The API server is burning too much error budget.',
+ summary: 'The API server is burning too much error budget.',
+ },
+ 'for': '%(for)s' % w,
+ }
+ for w in $._config.SLOs.apiserver.windows
+ ],
+ },
+ {
+ name: 'kubernetes-system-apiserver',
+ rules: [
+ {
+ alert: 'KubeClientCertificateExpiration',
+ expr: |||
+ apiserver_client_certificate_expiration_seconds_count{%(kubeApiserverSelector)s} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{%(kubeApiserverSelector)s}[5m]))) < %(certExpirationWarningSeconds)s
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'A client certificate used to authenticate to kubernetes apiserver is expiring in less than %s.' % (utils.humanizeSeconds($._config.certExpirationWarningSeconds)),
+ summary: 'Client certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeClientCertificateExpiration',
+ expr: |||
+ apiserver_client_certificate_expiration_seconds_count{%(kubeApiserverSelector)s} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{%(kubeApiserverSelector)s}[5m]))) < %(certExpirationCriticalSeconds)s
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'A client certificate used to authenticate to kubernetes apiserver is expiring in less than %s.' % (utils.humanizeSeconds($._config.certExpirationCriticalSeconds)),
+ summary: 'Client certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeAggregatedAPIErrors',
+ expr: |||
+ sum by(name, namespace, %(clusterLabel)s)(increase(aggregator_unavailable_apiservice_total{%(kubeApiserverSelector)s}[10m])) > 4
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace }} has reported errors. It has appeared unavailable {{ $value | humanize }} times averaged over the past 10m.',
+ summary: 'Kubernetes aggregated API has reported errors.',
+ },
+ },
+ {
+ alert: 'KubeAggregatedAPIDown',
+ expr: |||
+ (1 - max by(name, namespace, %(clusterLabel)s)(avg_over_time(aggregator_unavailable_apiservice{%(kubeApiserverSelector)s}[10m]))) * 100 < 85
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Kubernetes aggregated API {{ $labels.name }}/{{ $labels.namespace }} has been only {{ $value | humanize }}% available over the last 10m.',
+ summary: 'Kubernetes aggregated API is down.',
+ },
+ },
+ (import '../lib/absent_alert.libsonnet') {
+ componentName:: 'KubeAPI',
+ selector:: $._config.kubeApiserverSelector,
+ },
+ {
+ alert: 'KubeAPITerminatedRequests',
+ expr: |||
+ sum(rate(apiserver_request_terminations_total{%(kubeApiserverSelector)s}[10m])) / ( sum(rate(apiserver_request_total{%(kubeApiserverSelector)s}[10m])) + sum(rate(apiserver_request_terminations_total{%(kubeApiserverSelector)s}[10m])) ) > 0.20
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests.',
+ summary: 'The kubernetes apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests.',
+ },
+ 'for': '5m',
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_controller_manager.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_controller_manager.libsonnet
new file mode 100644
index 0000000..571e529
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_controller_manager.libsonnet
@@ -0,0 +1,19 @@
+{
+ _config+:: {
+ kubeControllerManagerSelector: error 'must provide selector for kube-controller-manager',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-system-controller-manager',
+ rules: [
+ (import '../lib/absent_alert.libsonnet') {
+ componentName:: 'KubeControllerManager',
+ selector:: $._config.kubeControllerManagerSelector,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_proxy.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_proxy.libsonnet
new file mode 100644
index 0000000..38afcbb
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_proxy.libsonnet
@@ -0,0 +1,19 @@
+{
+ _config+:: {
+ kubeProxySelector: error 'must provide selector for kube-proxy',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-system-kube-proxy',
+ rules: [
+ (import '../lib/absent_alert.libsonnet') {
+ componentName:: 'KubeProxy',
+ selector:: $._config.kubeProxySelector,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_scheduler.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_scheduler.libsonnet
new file mode 100644
index 0000000..baae29a
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kube_scheduler.libsonnet
@@ -0,0 +1,19 @@
+{
+ _config+:: {
+ kubeSchedulerSelector: 'job="kube-scheduler"',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-system-scheduler',
+ rules: [
+ (import '../lib/absent_alert.libsonnet') {
+ componentName:: 'KubeScheduler',
+ selector:: $._config.kubeSchedulerSelector,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kubelet.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kubelet.libsonnet
new file mode 100644
index 0000000..2b206de
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/kubelet.libsonnet
@@ -0,0 +1,202 @@
+{
+ _config+:: {
+ kubeStateMetricsSelector: error 'must provide selector for kube-state-metrics',
+ kubeletSelector: error 'must provide selector for kubelet',
+ kubeNodeUnreachableIgnoreKeys: [
+ 'ToBeDeletedByClusterAutoscaler',
+ 'cloud.google.com/impending-node-termination',
+ 'aws-node-termination-handler/spot-itn',
+ ],
+
+ kubeletCertExpirationWarningSeconds: 7 * 24 * 3600,
+ kubeletCertExpirationCriticalSeconds: 1 * 24 * 3600,
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-system-kubelet',
+ rules: [
+ {
+ expr: |||
+ kube_node_status_condition{%(kubeStateMetricsSelector)s,condition="Ready",status="true"} == 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: '{{ $labels.node }} has been unready for more than 15 minutes.',
+ summary: 'Node is not ready.',
+ },
+ 'for': '15m',
+ alert: 'KubeNodeNotReady',
+ },
+ {
+ expr: |||
+ (kube_node_spec_taint{%(kubeStateMetricsSelector)s,key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{%(kubeStateMetricsSelector)s,key=~"%(kubeNodeUnreachableIgnoreKeys)s"}) == 1
+ ||| % $._config {
+ kubeNodeUnreachableIgnoreKeys: std.join('|', super.kubeNodeUnreachableIgnoreKeys),
+ },
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: '{{ $labels.node }} is unreachable and some workloads may be rescheduled.',
+ summary: 'Node is unreachable.',
+ },
+ 'for': '15m',
+ alert: 'KubeNodeUnreachable',
+ },
+ {
+ alert: 'KubeletTooManyPods',
+ // Some node has a capacity of 1 like AWS's Fargate and only exists while a pod is running on it.
+ // We have to ignore this special node in the KubeletTooManyPods alert.
+ expr: |||
+ count by(%(clusterLabel)s, node) (
+ (kube_pod_status_phase{%(kubeStateMetricsSelector)s,phase="Running"} == 1) * on(instance,pod,namespace,%(clusterLabel)s) group_left(node) topk by(instance,pod,namespace,%(clusterLabel)s) (1, kube_pod_info{%(kubeStateMetricsSelector)s})
+ )
+ /
+ max by(%(clusterLabel)s, node) (
+ kube_node_status_capacity{%(kubeStateMetricsSelector)s,resource="pods"} != 1
+ ) > 0.95
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'info',
+ },
+ annotations: {
+ description: "Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity.",
+ summary: 'Kubelet is running at capacity.',
+ },
+ },
+ {
+ alert: 'KubeNodeReadinessFlapping',
+ expr: |||
+ sum(changes(kube_node_status_condition{%(kubeStateMetricsSelector)s,status="true",condition="Ready"}[15m])) by (%(clusterLabel)s, node) > 2
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes.',
+ summary: 'Node readiness status is flapping.',
+ },
+ },
+ {
+ alert: 'KubeletPlegDurationHigh',
+ expr: |||
+ node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}.',
+ summary: 'Kubelet Pod Lifecycle Event Generator is taking too long to relist.',
+ },
+ },
+ {
+ alert: 'KubeletPodStartUpLatencyHigh',
+ expr: |||
+ histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{%(kubeletSelector)s}[5m])) by (%(clusterLabel)s, instance, le)) * on(%(clusterLabel)s, instance) group_left(node) kubelet_node_name{%(kubeletSelector)s} > 60
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}.',
+ summary: 'Kubelet Pod startup latency is too high.',
+ },
+ },
+ {
+ alert: 'KubeletClientCertificateExpiration',
+ expr: |||
+ kubelet_certificate_manager_client_ttl_seconds < %(kubeletCertExpirationWarningSeconds)s
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.',
+ summary: 'Kubelet client certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeletClientCertificateExpiration',
+ expr: |||
+ kubelet_certificate_manager_client_ttl_seconds < %(kubeletCertExpirationCriticalSeconds)s
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'Client certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.',
+ summary: 'Kubelet client certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeletServerCertificateExpiration',
+ expr: |||
+ kubelet_certificate_manager_server_ttl_seconds < %(kubeletCertExpirationWarningSeconds)s
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.',
+ summary: 'Kubelet server certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeletServerCertificateExpiration',
+ expr: |||
+ kubelet_certificate_manager_server_ttl_seconds < %(kubeletCertExpirationCriticalSeconds)s
+ ||| % $._config,
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'Server certificate for Kubelet on node {{ $labels.node }} expires in {{ $value | humanizeDuration }}.',
+ summary: 'Kubelet server certificate is about to expire.',
+ },
+ },
+ {
+ alert: 'KubeletClientCertificateRenewalErrors',
+ expr: |||
+ increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ 'for': '15m',
+ annotations: {
+ description: 'Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 5 minutes).',
+ summary: 'Kubelet has failed to renew its client certificate.',
+ },
+ },
+ {
+ alert: 'KubeletServerCertificateRenewalErrors',
+ expr: |||
+ increase(kubelet_server_expiration_renew_errors[5m]) > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ 'for': '15m',
+ annotations: {
+ description: 'Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes).',
+ summary: 'Kubelet has failed to renew its server certificate.',
+ },
+ },
+ (import '../lib/absent_alert.libsonnet') {
+ componentName:: 'Kubelet',
+ selector:: $._config.kubeletSelector,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/resource_alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/resource_alerts.libsonnet
new file mode 100644
index 0000000..46225af
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/resource_alerts.libsonnet
@@ -0,0 +1,220 @@
+{
+ _config+:: {
+ kubeStateMetricsSelector: error 'must provide selector for kube-state-metrics',
+ nodeExporterSelector: error 'must provide selector for node-exporter',
+ namespaceSelector: null,
+ prefixedNamespaceSelector: if self.namespaceSelector != null then self.namespaceSelector + ',' else '',
+
+ // We alert when the aggregate (CPU, Memory) quota for all namespaces is
+ // greater than the amount of the resources in the cluster. We do however
+ // allow you to overcommit if you wish.
+ namespaceOvercommitFactor: 1.5,
+ cpuThrottlingPercent: 25,
+ cpuThrottlingSelector: '',
+ // Set this selector for seleting namespaces that contains resources used for overprovision
+ // See https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/FAQ.md#how-can-i-configure-overprovisioning-with-cluster-autoscaler
+ // for more details.
+ ignoringOverprovisionedWorkloadSelector: '',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-resources',
+ rules: [
+ {
+ alert: 'KubeCPUOvercommit',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Cluster has overcommitted CPU resource requests.',
+ },
+ 'for': '10m',
+ } +
+ if $._config.showMultiCluster then {
+ expr: |||
+ sum(namespace_cpu:kube_pod_container_resource_requests:sum{%(kubeStateMetricsSelector)s,%(ignoringOverprovisionedWorkloadSelector)s}) by (%(clusterLabel)s) - (sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu"}) by (%(clusterLabel)s) - max(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu"}) by (%(clusterLabel)s)) > 0
+ and
+ (sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu"}) by (%(clusterLabel)s) - max(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu"}) by (%(clusterLabel)s)) > 0
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster {{ $labels.%(clusterLabel)s }} has overcommitted CPU resource requests for Pods by {{ $value }} CPU shares and cannot tolerate node failure.' % $._config,
+ },
+ } else {
+ expr: |||
+ sum(namespace_cpu:kube_pod_container_resource_requests:sum{%(ignoringOverprovisionedWorkloadSelector)s}) - (sum(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s}) - max(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s})) > 0
+ and
+ (sum(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s}) - max(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s})) > 0
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster has overcommitted CPU resource requests for Pods by {{ $value }} CPU shares and cannot tolerate node failure.' % $._config,
+ },
+ },
+ {
+ alert: 'KubeMemoryOvercommit',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Cluster has overcommitted memory resource requests.',
+ },
+ 'for': '10m',
+ } +
+ if $._config.showMultiCluster then {
+ expr: |||
+ sum(namespace_memory:kube_pod_container_resource_requests:sum{%(ignoringOverprovisionedWorkloadSelector)s}) by (%(clusterLabel)s) - (sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s) - max(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s)) > 0
+ and
+ (sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s) - max(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s)) > 0
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster {{ $labels.%(clusterLabel)s }} has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure.' % $._config,
+ },
+ } else
+ {
+ expr: |||
+ sum(namespace_memory:kube_pod_container_resource_requests:sum{%(ignoringOverprovisionedWorkloadSelector)s}) - (sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) - max(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s})) > 0
+ and
+ (sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) - max(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s})) > 0
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster has overcommitted memory resource requests for Pods by {{ $value | humanize }} bytes and cannot tolerate node failure.',
+ },
+ },
+ {
+ alert: 'KubeCPUQuotaOvercommit',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Cluster has overcommitted CPU resource requests.',
+ },
+ 'for': '5m',
+ } +
+ if $._config.showMultiCluster then {
+ expr: |||
+ sum(min without(resource) (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard", resource=~"(cpu|requests.cpu)"})) by (%(clusterLabel)s)
+ /
+ sum(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s)
+ > %(namespaceOvercommitFactor)s
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster {{ $labels.%(clusterLabel)s }} has overcommitted CPU resource requests for Namespaces.' % $._config,
+ },
+ } else
+ {
+ expr: |||
+ sum(min without(resource) (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard", resource=~"(cpu|requests.cpu)"}))
+ /
+ sum(kube_node_status_allocatable{resource="cpu", %(kubeStateMetricsSelector)s})
+ > %(namespaceOvercommitFactor)s
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster has overcommitted CPU resource requests for Namespaces.',
+ },
+ },
+ {
+ alert: 'KubeMemoryQuotaOvercommit',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Cluster has overcommitted memory resource requests.',
+ },
+ 'for': '5m',
+ } +
+ if $._config.showMultiCluster then {
+ expr: |||
+ sum(min without(resource) (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard", resource=~"(memory|requests.memory)"})) by (%(clusterLabel)s)
+ /
+ sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s}) by (%(clusterLabel)s)
+ > %(namespaceOvercommitFactor)s
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster {{ $labels.%(clusterLabel)s }} has overcommitted memory resource requests for Namespaces.' % $._config,
+ },
+ } else
+ {
+ expr: |||
+ sum(min without(resource) (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard", resource=~"(memory|requests.memory)"}))
+ /
+ sum(kube_node_status_allocatable{resource="memory", %(kubeStateMetricsSelector)s})
+ > %(namespaceOvercommitFactor)s
+ ||| % $._config,
+ annotations+: {
+ description: 'Cluster has overcommitted memory resource requests for Namespaces.',
+ },
+ },
+ {
+ alert: 'KubeQuotaAlmostFull',
+ expr: |||
+ kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="used"}
+ / ignoring(instance, job, type)
+ (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard"} > 0)
+ > 0.9 < 1
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'info',
+ },
+ annotations: {
+ description: 'Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.',
+ summary: 'Namespace quota is going to be full.',
+ },
+ },
+ {
+ alert: 'KubeQuotaFullyUsed',
+ expr: |||
+ kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="used"}
+ / ignoring(instance, job, type)
+ (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard"} > 0)
+ == 1
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'info',
+ },
+ annotations: {
+ description: 'Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.',
+ summary: 'Namespace quota is fully used.',
+ },
+ },
+ {
+ alert: 'KubeQuotaExceeded',
+ expr: |||
+ kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="used"}
+ / ignoring(instance, job, type)
+ (kube_resourcequota{%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s, type="hard"} > 0)
+ > 1
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Namespace {{ $labels.namespace }} is using {{ $value | humanizePercentage }} of its {{ $labels.resource }} quota.',
+ summary: 'Namespace quota has exceeded the limits.',
+ },
+ },
+ {
+ alert: 'CPUThrottlingHigh',
+ expr: |||
+ sum(increase(container_cpu_cfs_throttled_periods_total{container!="", %(cpuThrottlingSelector)s}[5m])) by (container, pod, namespace)
+ /
+ sum(increase(container_cpu_cfs_periods_total{%(cpuThrottlingSelector)s}[5m])) by (container, pod, namespace)
+ > ( %(cpuThrottlingPercent)s / 100 )
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'info',
+ },
+ annotations: {
+ description: '{{ $value | humanizePercentage }} throttling of CPU in namespace {{ $labels.namespace }} for container {{ $labels.container }} in pod {{ $labels.pod }}.',
+ summary: 'Processes experience elevated CPU throttling.',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/storage_alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/storage_alerts.libsonnet
new file mode 100644
index 0000000..ce02868
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/storage_alerts.libsonnet
@@ -0,0 +1,137 @@
+{
+ _config+:: {
+ kubeStateMetricsSelector: error 'must provide selector for kube-state-metrics',
+ kubeletSelector: error 'must provide selector for kubelet',
+ namespaceSelector: null,
+ prefixedNamespaceSelector: if self.namespaceSelector != null then self.namespaceSelector + ',' else '',
+
+ // We alert when a disk is expected to fill up in four days. Depending on
+ // the data-set it might be useful to change the sampling-time for the
+ // prediction
+ volumeFullPredictionSampleTime: '6h',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-storage',
+ rules: [
+ {
+ alert: 'KubePersistentVolumeFillingUp',
+ expr: |||
+ (
+ kubelet_volume_stats_available_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ /
+ kubelet_volume_stats_capacity_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ ) < 0.03
+ and
+ kubelet_volume_stats_used_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s} > 0
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_access_mode{%(prefixedNamespaceSelector)s access_mode="ReadOnlyMany"} == 1
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_labels{%(prefixedNamespaceSelector)s%(pvExcludedSelector)s} == 1
+ ||| % $._config,
+ 'for': '1m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is only {{ $value | humanizePercentage }} free.',
+ summary: 'PersistentVolume is filling up.',
+ },
+ },
+ {
+ alert: 'KubePersistentVolumeFillingUp',
+ expr: |||
+ (
+ kubelet_volume_stats_available_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ /
+ kubelet_volume_stats_capacity_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ ) < 0.15
+ and
+ kubelet_volume_stats_used_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s} > 0
+ and
+ predict_linear(kubelet_volume_stats_available_bytes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}[%(volumeFullPredictionSampleTime)s], 4 * 24 * 3600) < 0
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_access_mode{%(prefixedNamespaceSelector)s access_mode="ReadOnlyMany"} == 1
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_labels{%(prefixedNamespaceSelector)s%(pvExcludedSelector)s} == 1
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to fill up within four days. Currently {{ $value | humanizePercentage }} is available.',
+ summary: 'PersistentVolume is filling up.',
+ },
+ },
+ {
+ alert: 'KubePersistentVolumeInodesFillingUp',
+ expr: |||
+ (
+ kubelet_volume_stats_inodes_free{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ /
+ kubelet_volume_stats_inodes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ ) < 0.03
+ and
+ kubelet_volume_stats_inodes_used{%(prefixedNamespaceSelector)s%(kubeletSelector)s} > 0
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_access_mode{%(prefixedNamespaceSelector)s access_mode="ReadOnlyMany"} == 1
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_labels{%(prefixedNamespaceSelector)s%(pvExcludedSelector)s} == 1
+ ||| % $._config,
+ 'for': '1m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'The PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} only has {{ $value | humanizePercentage }} free inodes.',
+ summary: 'PersistentVolumeInodes are filling up.',
+ },
+ },
+ {
+ alert: 'KubePersistentVolumeInodesFillingUp',
+ expr: |||
+ (
+ kubelet_volume_stats_inodes_free{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ /
+ kubelet_volume_stats_inodes{%(prefixedNamespaceSelector)s%(kubeletSelector)s}
+ ) < 0.15
+ and
+ kubelet_volume_stats_inodes_used{%(prefixedNamespaceSelector)s%(kubeletSelector)s} > 0
+ and
+ predict_linear(kubelet_volume_stats_inodes_free{%(prefixedNamespaceSelector)s%(kubeletSelector)s}[%(volumeFullPredictionSampleTime)s], 4 * 24 * 3600) < 0
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_access_mode{%(prefixedNamespaceSelector)s access_mode="ReadOnlyMany"} == 1
+ unless on(namespace, persistentvolumeclaim)
+ kube_persistentvolumeclaim_labels{%(prefixedNamespaceSelector)s%(pvExcludedSelector)s} == 1
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'Based on recent sampling, the PersistentVolume claimed by {{ $labels.persistentvolumeclaim }} in Namespace {{ $labels.namespace }} is expected to run out of inodes within four days. Currently {{ $value | humanizePercentage }} of its inodes are free.',
+ summary: 'PersistentVolumeInodes are filling up.',
+ },
+ },
+ {
+ alert: 'KubePersistentVolumeErrors',
+ expr: |||
+ kube_persistentvolume_status_phase{phase=~"Failed|Pending",%(prefixedNamespaceSelector)s%(kubeStateMetricsSelector)s} > 0
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: 'The persistent volume {{ $labels.persistentvolume }} has status {{ $labels.phase }}.',
+ summary: 'PersistentVolume is having issues with provisioning.',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/system_alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/system_alerts.libsonnet
new file mode 100644
index 0000000..2a434a3
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/alerts/system_alerts.libsonnet
@@ -0,0 +1,50 @@
+{
+ _config+:: {
+ notKubeDnsCoreDnsSelector: 'job!~"kube-dns|coredns"',
+ kubeApiserverSelector: 'job="kube-apiserver"',
+ },
+
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'kubernetes-system',
+ rules: [
+ {
+ alert: 'KubeVersionMismatch',
+ expr: |||
+ count by (%(clusterLabel)s) (count by (git_version, %(clusterLabel)s) (label_replace(kubernetes_build_info{%(notKubeDnsCoreDnsSelector)s},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*"))) > 1
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: 'There are {{ $value }} different semantic versions of Kubernetes components running.',
+ summary: 'Different semantic versions of Kubernetes components running.',
+ },
+ },
+ {
+ alert: 'KubeClientErrors',
+ // Many clients use get requests to check the existence of objects,
+ // this is normal and an expected error, therefore it should be
+ // ignored in this alert.
+ expr: |||
+ (sum(rate(rest_client_requests_total{%(kubeApiserverSelector)s,code=~"5.."}[5m])) by (%(clusterLabel)s, instance, job, namespace)
+ /
+ sum(rate(rest_client_requests_total{%(kubeApiserverSelector)s}[5m])) by (%(clusterLabel)s, instance, job, namespace))
+ > 0.01
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ description: "Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.'",
+ summary: 'Kubernetes API server client is experiencing errors.',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/config.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/config.libsonnet
new file mode 100644
index 0000000..41a94f1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/config.libsonnet
@@ -0,0 +1,113 @@
+{
+ _config+:: {
+ SLOs: {
+ apiserver: {
+ days: 30, // The number of days we alert on burning too much error budget for.
+ target: 0.99, // The target percentage of availability between 0-1. (0.99 = 99%, 0.999 = 99.9%)
+
+ // Only change these windows when you really understand multi burn rate errors.
+ // Even though you can change the days above (which will change availability calculations)
+ // these windows will alert on a 30 days sliding window. We're looking into basing these windows on the given days too.
+ windows: [
+ { severity: 'critical', 'for': '2m', long: '1h', short: '5m', factor: 14.4 },
+ { severity: 'critical', 'for': '15m', long: '6h', short: '30m', factor: 6 },
+ { severity: 'warning', 'for': '1h', long: '1d', short: '2h', factor: 3 },
+ { severity: 'warning', 'for': '3h', long: '3d', short: '6h', factor: 1 },
+ ],
+ },
+ },
+
+ // Selectors are inserted between {} in Prometheus queries.
+ cadvisorSelector: 'job="cadvisor"',
+ kubeletSelector: 'job="kubelet"',
+ kubeStateMetricsSelector: 'job="kube-state-metrics"',
+ nodeExporterSelector: 'job="node-exporter"',
+ kubeSchedulerSelector: 'job="kube-scheduler"',
+ kubeControllerManagerSelector: 'job="kube-controller-manager"',
+ kubeApiserverSelector: 'job="kube-apiserver"',
+ kubeProxySelector: 'job="kube-proxy"',
+ podLabel: 'pod',
+ hostNetworkInterfaceSelector: 'device!~"veth.+"',
+ hostMountpointSelector: 'mountpoint="/"',
+ windowsExporterSelector: 'job="kubernetes-windows-exporter"',
+ containerfsSelector: 'container!=""',
+
+ // Grafana dashboard IDs are necessary for stable links for dashboards
+ grafanaDashboardIDs: {
+ 'k8s-resources-multicluster.json': '1gBgaexoVZ4TpBNAt2eGRsc4LNjNhdjcZd6cqU6S',
+ 'k8s-resources-cluster.json': 'ZnbvYbcXkob7GLqcDPLTj1ZL4MRX87tOh8xdr831',
+ 'k8s-resources-namespace.json': 'XaY4UCP3J51an4ikqtkUGBSjLpDW4pg39xe2FuxP',
+ 'k8s-resources-pod.json': 'wU56sdGSNYZTL3eO0db3pONtVmTvsyV7w8aadbYF',
+ 'k8s-multicluster-rsrc-use.json': 'NJ9AlnsObVgj9uKiJMeAqfzMi1wihOMupcsDhlhR',
+ 'k8s-cluster-rsrc-use.json': 'uXQldxzqUNgIOUX6FyZNvqgP2vgYb78daNu4GiDc',
+ 'k8s-node-rsrc-use.json': 'E577CMUOwmPsxVVqM9lj40czM1ZPjclw7hGa7OT7',
+ 'nodes.json': 'kcb9C2QDe4IYcjiTOmYyfhsImuzxRcvwWC3YLJPS',
+ 'persistentvolumesusage.json': 'AhCeikee0xoa6faec0Weep2nee6shaiquigahw8b',
+ 'pods.json': 'AMK9hS0rSbSz7cKjPHcOtk6CGHFjhSHwhbQ3sedK',
+ 'statefulset.json': 'dPiBt0FRG5BNYo0XJ4L0Meoc7DWs9eL40c1CRc1g',
+ 'k8s-resources-windows-cluster.json': '4d08557fd9391b100730f2494bccac68',
+ 'k8s-resources-windows-namespace.json': '490b402361724ab1d4c45666c1fa9b6f',
+ 'k8s-resources-windows-pod.json': '40597a704a610e936dc6ed374a7ce023',
+ 'k8s-windows-cluster-rsrc-use.json': '53a43377ec9aaf2ff64dfc7a1f539334',
+ 'k8s-windows-node-rsrc-use.json': '96e7484b0bb53b74fbc2bcb7723cd40b',
+ 'k8s-resources-workloads-namespace.json': 'L29WgMrccBDauPs3Xsti3fwaKjMB6fReufWj6Gl1',
+ 'k8s-resources-workload.json': 'hZCNbUPfUqjc95N3iumVsaEVHXzaBr3IFKRFvUJf',
+ 'apiserver.json': 'eswbt59QCroA3XLdKFvdOHlKB8Iks3h7d2ohstxr',
+ 'controller-manager.json': '5g73oHG0pCRz4X1t6gNYouVUv9urrQd4wCdHR2mI',
+ 'scheduler.json': '4uMPZ9jmwvYJcM5fcNcNrrt9Sf6ufQL4IKFri2Gp',
+ 'proxy.json': 'hhT4orXD1Ott4U1bNNps0R26EHTwMypdcaCjDRPM',
+ 'kubelet.json': 'B1azll2ETo7DTiM8CysrH6g4s5NCgkOz6ZdU8Q0j',
+ },
+
+ // Support for Grafana 7.2+ `$__rate_interval` instead of `$__interval`
+ grafana72: true,
+ grafanaIntervalVar: if self.grafana72 then '$__rate_interval' else '$__interval',
+
+ // Config for the Grafana dashboards in the Kubernetes Mixin
+ grafanaK8s: {
+ dashboardNamePrefix: 'Kubernetes / ',
+ dashboardTags: ['kubernetes-mixin'],
+
+ // For links between grafana dashboards, you need to tell us if your grafana
+ // servers under some non-root path.
+ linkPrefix: '',
+
+ // The default refresh time for all dashboards, default to 10s
+ refresh: '10s',
+ minimumTimeInterval: '1m',
+
+ // Timezone for Grafana dashboards:: UTC, browser, ...
+ grafanaTimezone: 'UTC',
+ },
+
+ // Opt-in to multiCluster dashboards by overriding this and the clusterLabel.
+ showMultiCluster: false,
+ clusterLabel: 'cluster',
+
+ namespaceLabel: 'namespace',
+
+ // Default datasource name
+ datasourceName: 'default',
+
+ // Datasource instance filter regex
+ datasourceFilterRegex: '',
+
+ // This list of filesystem is referenced in various expressions.
+ fstypes: ['ext[234]', 'btrfs', 'xfs', 'zfs'],
+ fstypeSelector: 'fstype=~"%s"' % std.join('|', self.fstypes),
+
+ // This list of disk device names is referenced in various expressions.
+ diskDevices: ['mmcblk.p.+', 'nvme.+', 'rbd.+', 'sd.+', 'vd.+', 'xvd.+', 'dm-.+', 'dasd.+'],
+ diskDeviceSelector: 'device=~"(/dev.+)|%s"' % std.join('|', self.diskDevices),
+
+ // Certain workloads (e.g. KubeVirt/CDI) will fully utilise the persistent volume they claim
+ // the size of the PV will never grow since they consume the entirety of the volume by design.
+ // This selector allows an admin to 'pre-mark' the PVC of such a workload (or for any other use case)
+ // so that specific storage alerts will not fire.With the default selector, adding a label `exclude-from-alerts: 'true'`
+ // to the PVC will have the desired effect.
+ pvExcludedSelector: 'label_excluded_from_alerts="true"',
+
+ // Default timeout value for k8s Jobs. The jobs which are active beyond this duration would trigger KubeJobNotCompleted alert.
+ kubeJobTimeoutDuration: 12 * 60 * 60,
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/apiserver.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/apiserver.libsonnet
new file mode 100644
index 0000000..e1d38c9
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/apiserver.libsonnet
@@ -0,0 +1,286 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local singlestat = grafana.singlestat;
+
+{
+ _config+:: {
+ kubeApiserverSelector: 'job="kube-apiserver"',
+ },
+
+ grafanaDashboards+:: {
+ 'apiserver.json':
+ local availability1d =
+ singlestat.new(
+ 'Availability (%dd) > %.3f%%' % [$._config.SLOs.apiserver.days, 100 * $._config.SLOs.apiserver.target],
+ datasource='$datasource',
+ span=4,
+ format='percentunit',
+ decimals=3,
+ description='How many percent of requests (both read and write) in %d days have been answered successfully and fast enough?' % $._config.SLOs.apiserver.days,
+ )
+ .addTarget(prometheus.target('apiserver_request:availability%dd{verb="all", %(clusterLabel)s="$cluster"}' % [$._config.SLOs.apiserver.days, $._config.clusterLabel]));
+
+ local errorBudget =
+ graphPanel.new(
+ 'ErrorBudget (%dd) > %.3f%%' % [$._config.SLOs.apiserver.days, 100 * $._config.SLOs.apiserver.target],
+ datasource='$datasource',
+ span=8,
+ format='percentunit',
+ decimals=3,
+ fill=10,
+ description='How much error budget is left looking at our %.3f%% availability guarantees?' % $._config.SLOs.apiserver.target,
+ )
+ .addTarget(prometheus.target('100 * (apiserver_request:availability%dd{verb="all", %(clusterLabel)s="$cluster"} - %f)' % [$._config.SLOs.apiserver.days, $._config.clusterLabel, $._config.SLOs.apiserver.target], legendFormat='errorbudget'));
+
+ local readAvailability =
+ singlestat.new(
+ 'Read Availability (%dd)' % $._config.SLOs.apiserver.days,
+ datasource='$datasource',
+ span=3,
+ format='percentunit',
+ decimals=3,
+ description='How many percent of read requests (LIST,GET) in %d days have been answered successfully and fast enough?' % $._config.SLOs.apiserver.days,
+ )
+ .addTarget(prometheus.target('apiserver_request:availability%dd{verb="read", %(clusterLabel)s="$cluster"}' % [$._config.SLOs.apiserver.days, $._config.clusterLabel]));
+
+ local readRequests =
+ graphPanel.new(
+ 'Read SLI - Requests',
+ datasource='$datasource',
+ span=3,
+ format='reqps',
+ stack=true,
+ fill=10,
+ description='How many read requests (LIST,GET) per second do the apiservers get by code?',
+ )
+ .addSeriesOverride({ alias: '/2../i', color: '#56A64B' })
+ .addSeriesOverride({ alias: '/3../i', color: '#F2CC0C' })
+ .addSeriesOverride({ alias: '/4../i', color: '#3274D9' })
+ .addSeriesOverride({ alias: '/5../i', color: '#E02F44' })
+ .addTarget(prometheus.target('sum by (code) (code_resource:apiserver_request_total:rate5m{verb="read", %(clusterLabel)s="$cluster"})' % $._config, legendFormat='{{ code }}'));
+
+ local readErrors =
+ graphPanel.new(
+ 'Read SLI - Errors',
+ datasource='$datasource',
+ min=0,
+ span=3,
+ format='percentunit',
+ description='How many percent of read requests (LIST,GET) per second are returned with errors (5xx)?',
+ )
+ .addTarget(prometheus.target('sum by (resource) (code_resource:apiserver_request_total:rate5m{verb="read",code=~"5..", %(clusterLabel)s="$cluster"}) / sum by (resource) (code_resource:apiserver_request_total:rate5m{verb="read", %(clusterLabel)s="$cluster"})' % $._config, legendFormat='{{ resource }}'));
+
+ local readDuration =
+ graphPanel.new(
+ 'Read SLI - Duration',
+ datasource='$datasource',
+ span=3,
+ format='s',
+ description='How many seconds is the 99th percentile for reading (LIST|GET) a given resource?',
+ )
+ .addTarget(prometheus.target('cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile{verb="read", %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{ resource }}'));
+
+ local writeAvailability =
+ singlestat.new(
+ 'Write Availability (%dd)' % $._config.SLOs.apiserver.days,
+ datasource='$datasource',
+ span=3,
+ format='percentunit',
+ decimals=3,
+ description='How many percent of write requests (POST|PUT|PATCH|DELETE) in %d days have been answered successfully and fast enough?' % $._config.SLOs.apiserver.days,
+ )
+ .addTarget(prometheus.target('apiserver_request:availability%dd{verb="write", %(clusterLabel)s="$cluster"}' % [$._config.SLOs.apiserver.days, $._config.clusterLabel]));
+
+ local writeRequests =
+ graphPanel.new(
+ 'Write SLI - Requests',
+ datasource='$datasource',
+ span=3,
+ format='reqps',
+ stack=true,
+ fill=10,
+ description='How many write requests (POST|PUT|PATCH|DELETE) per second do the apiservers get by code?',
+ )
+ .addSeriesOverride({ alias: '/2../i', color: '#56A64B' })
+ .addSeriesOverride({ alias: '/3../i', color: '#F2CC0C' })
+ .addSeriesOverride({ alias: '/4../i', color: '#3274D9' })
+ .addSeriesOverride({ alias: '/5../i', color: '#E02F44' })
+ .addTarget(prometheus.target('sum by (code) (code_resource:apiserver_request_total:rate5m{verb="write", %(clusterLabel)s="$cluster"})' % $._config, legendFormat='{{ code }}'));
+
+ local writeErrors =
+ graphPanel.new(
+ 'Write SLI - Errors',
+ datasource='$datasource',
+ min=0,
+ span=3,
+ format='percentunit',
+ description='How many percent of write requests (POST|PUT|PATCH|DELETE) per second are returned with errors (5xx)?',
+ )
+ .addTarget(prometheus.target('sum by (resource) (code_resource:apiserver_request_total:rate5m{verb="write",code=~"5..", %(clusterLabel)s="$cluster"}) / sum by (resource) (code_resource:apiserver_request_total:rate5m{verb="write", %(clusterLabel)s="$cluster"})' % $._config, legendFormat='{{ resource }}'));
+
+ local writeDuration =
+ graphPanel.new(
+ 'Write SLI - Duration',
+ datasource='$datasource',
+ span=3,
+ format='s',
+ description='How many seconds is the 99th percentile for writing (POST|PUT|PATCH|DELETE) a given resource?',
+ )
+ .addTarget(prometheus.target('cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile{verb="write", %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{ resource }}'));
+
+ local workQueueAddRate =
+ graphPanel.new(
+ 'Work Queue Add Rate',
+ datasource='$datasource',
+ span=6,
+ format='ops',
+ legend_show=false,
+ min=0,
+ )
+ .addTarget(prometheus.target('sum(rate(workqueue_adds_total{%(kubeApiserverSelector)s, instance=~"$instance", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])) by (instance, name)' % $._config, legendFormat='{{instance}} {{name}}'));
+
+ local workQueueDepth =
+ graphPanel.new(
+ 'Work Queue Depth',
+ datasource='$datasource',
+ span=6,
+ format='short',
+ legend_show=false,
+ min=0,
+ )
+ .addTarget(prometheus.target('sum(rate(workqueue_depth{%(kubeApiserverSelector)s, instance=~"$instance", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])) by (instance, name)' % $._config, legendFormat='{{instance}} {{name}}'));
+
+
+ local workQueueLatency =
+ graphPanel.new(
+ 'Work Queue Latency',
+ datasource='$datasource',
+ span=12,
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{%(kubeApiserverSelector)s, instance=~"$instance", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])) by (instance, name, le))' % $._config, legendFormat='{{instance}} {{name}}'));
+
+ local memory =
+ graphPanel.new(
+ 'Memory',
+ datasource='$datasource',
+ span=4,
+ format='bytes',
+ )
+ .addTarget(prometheus.target('process_resident_memory_bytes{%(kubeApiserverSelector)s,instance=~"$instance", %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}'));
+
+ local cpu =
+ graphPanel.new(
+ 'CPU usage',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ min=0,
+ )
+ .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(kubeApiserverSelector)s,instance=~"$instance", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}'));
+
+ local goroutines =
+ graphPanel.new(
+ 'Goroutines',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ )
+ .addTarget(prometheus.target('go_goroutines{%(kubeApiserverSelector)s,instance=~"$instance", %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}'));
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sAPI server' % $._config.grafanaK8s,
+ time_from='now-1h',
+ uid=($._config.grafanaDashboardIDs['apiserver.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(kubeApiserverSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(up{%(kubeApiserverSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config,
+ refresh='time',
+ includeAll=true,
+ sort=1,
+ )
+ )
+ .addPanel(
+ grafana.text.new(
+ title='Notice',
+ content='The SLO (service level objective) and other metrics displayed on this dashboard are for informational purposes only.',
+ description='The SLO (service level objective) and other metrics displayed on this dashboard are for informational purposes only.',
+ span=12,
+ ),
+ gridPos={
+ h: 2,
+ w: 24,
+ x: 0,
+ y: 0,
+ },
+ )
+ .addRow(
+ row.new()
+ .addPanel(availability1d)
+ .addPanel(errorBudget)
+ )
+ .addRow(
+ row.new()
+ .addPanel(readAvailability)
+ .addPanel(readRequests)
+ .addPanel(readErrors)
+ .addPanel(readDuration)
+ )
+ .addRow(
+ row.new()
+ .addPanel(writeAvailability)
+ .addPanel(writeRequests)
+ .addPanel(writeErrors)
+ .addPanel(writeDuration)
+ ).addRow(
+ row.new()
+ .addPanel(workQueueAddRate)
+ .addPanel(workQueueDepth)
+ .addPanel(workQueueLatency)
+ ).addRow(
+ row.new()
+ .addPanel(memory)
+ .addPanel(cpu)
+ .addPanel(goroutines)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/controller-manager.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/controller-manager.libsonnet
new file mode 100644
index 0000000..e56010c
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/controller-manager.libsonnet
@@ -0,0 +1,196 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local singlestat = grafana.singlestat;
+
+{
+ grafanaDashboards+:: {
+ 'controller-manager.json':
+ local upCount =
+ singlestat.new(
+ 'Up',
+ datasource='$datasource',
+ span=2,
+ valueName='min',
+ )
+ .addTarget(prometheus.target('sum(up{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s})' % $._config));
+
+ local workQueueAddRate =
+ graphPanel.new(
+ 'Work Queue Add Rate',
+ datasource='$datasource',
+ span=10,
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(workqueue_adds_total{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, name)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} {{name}}' % $._config));
+
+ local workQueueDepth =
+ graphPanel.new(
+ 'Work Queue Depth',
+ datasource='$datasource',
+ span=12,
+ min=0,
+ format='short',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(workqueue_depth{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, name)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} {{name}}' % $._config));
+
+ local workQueueLatency =
+ graphPanel.new(
+ 'Work Queue Latency',
+ datasource='$datasource',
+ span=12,
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, name, le))' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} {{name}}' % $._config));
+
+ local rpcRate =
+ graphPanel.new(
+ 'Kube API Request Rate',
+ datasource='$datasource',
+ span=4,
+ format='ops',
+ )
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(kubeControllerManagerSelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='2xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(kubeControllerManagerSelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='3xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(kubeControllerManagerSelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='4xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(kubeControllerManagerSelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='5xx'));
+
+ local postRequestLatency =
+ graphPanel.new(
+ 'Post Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=8,
+ format='s',
+ min=0,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s, instance=~"$instance", verb="POST"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local getRequestLatency =
+ graphPanel.new(
+ 'Get Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=12,
+ format='s',
+ min=0,
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s, instance=~"$instance", verb="GET"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local memory =
+ graphPanel.new(
+ 'Memory',
+ datasource='$datasource',
+ span=4,
+ format='bytes',
+ )
+ .addTarget(prometheus.target('process_resident_memory_bytes{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+ local cpu =
+ graphPanel.new(
+ 'CPU usage',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ min=0,
+ )
+ .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}'));
+
+ local goroutines =
+ graphPanel.new(
+ 'Goroutines',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ )
+ .addTarget(prometheus.target('go_goroutines{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sController Manager' % $._config.grafanaK8s,
+ time_from='now-1h',
+ uid=($._config.grafanaDashboardIDs['controller-manager.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(kubeControllerManagerSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(up{%(clusterLabel)s="$cluster", %(kubeControllerManagerSelector)s}, instance)' % $._config,
+ refresh='time',
+ includeAll=true,
+ sort=1,
+ )
+ )
+ .addRow(
+ row.new()
+ .addPanel(upCount)
+ .addPanel(workQueueAddRate)
+ ).addRow(
+ row.new()
+ .addPanel(workQueueDepth)
+ ).addRow(
+ row.new()
+ .addPanel(workQueueLatency)
+ ).addRow(
+ row.new()
+ .addPanel(rpcRate)
+ .addPanel(postRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(getRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(memory)
+ .addPanel(cpu)
+ .addPanel(goroutines)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/dashboards.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/dashboards.libsonnet
new file mode 100644
index 0000000..51bf17e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/dashboards.libsonnet
@@ -0,0 +1,9 @@
+(import 'network.libsonnet') +
+(import 'persistentvolumesusage.libsonnet') +
+(import 'resources.libsonnet') +
+(import 'apiserver.libsonnet') +
+(import 'controller-manager.libsonnet') +
+(import 'scheduler.libsonnet') +
+(import 'proxy.libsonnet') +
+(import 'kubelet.libsonnet') +
+(import 'defaults.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/defaults.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/defaults.libsonnet
new file mode 100644
index 0000000..f232238
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/defaults.libsonnet
@@ -0,0 +1,39 @@
+{
+ local kubernetesMixin = self,
+ local grafanaDashboards = super.grafanaDashboards,
+
+ // Automatically add a uid to each dashboard based on the base64 encoding
+ // of the file name and set the timezone to be 'default'.
+ grafanaDashboards:: {
+ [filename]: grafanaDashboards[filename] {
+ uid: std.md5(filename),
+ timezone: kubernetesMixin._config.grafanaK8s.grafanaTimezone,
+ refresh: kubernetesMixin._config.grafanaK8s.refresh,
+ tags: kubernetesMixin._config.grafanaK8s.dashboardTags,
+
+ rows: [
+ row {
+ panels: [
+ panel {
+ // Modify tooltip to only show a single value
+ tooltip+: {
+ shared: false,
+ },
+ // Modify legend to always show as table on right side
+ legend+: {
+ alignAsTable: true,
+ rightSide: true,
+ },
+ // Set minimum time interval for all panels
+ interval: kubernetesMixin._config.grafanaK8s.minimumTimeInterval,
+ }
+ for panel in super.panels
+ ],
+ }
+ for row in super.rows
+ ],
+
+ }
+ for filename in std.objectFields(grafanaDashboards)
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/kubelet.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/kubelet.libsonnet
new file mode 100644
index 0000000..6069686
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/kubelet.libsonnet
@@ -0,0 +1,356 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local statPanel = grafana.statPanel;
+
+{
+ grafanaDashboards+:: {
+ 'kubelet.json':
+ local upCount =
+ statPanel.new(
+ 'Running Kubelets',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ .addTarget(prometheus.target('sum(kubelet_node_name{%(clusterLabel)s="$cluster", %(kubeletSelector)s})' % $._config));
+
+ local runningPodCount =
+ statPanel.new(
+ 'Running Pods',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ // TODO: The second query selected by the OR operator is for backward compatibility with kubernetes < 1.19, so this can be retored to a single query once 1.23 is out
+ .addTarget(prometheus.target('sum(kubelet_running_pods{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}) OR sum(kubelet_running_pod_count{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"})' % $._config, legendFormat='{{instance}}'));
+
+ local runningContainerCount =
+ statPanel.new(
+ 'Running Containers',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ // TODO: The second query selected by the OR operator is for backward compatibility with kubernetes < 1.19, so this can be retored to a single query once 1.23 is out
+ .addTarget(prometheus.target('sum(kubelet_running_containers{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}) OR sum(kubelet_running_container_count{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"})' % $._config, legendFormat='{{instance}}'));
+
+ local actualVolumeCount =
+ statPanel.new(
+ 'Actual Volume Count',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ .addTarget(prometheus.target('sum(volume_manager_total_volumes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance", state="actual_state_of_world"})' % $._config, legendFormat='{{instance}}'));
+
+ local desiredVolumeCount =
+ statPanel.new(
+ 'Desired Volume Count',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ .addTarget(prometheus.target('sum(volume_manager_total_volumes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance",state="desired_state_of_world"})' % $._config, legendFormat='{{instance}}'));
+
+ local configErrorCount =
+ statPanel.new(
+ 'Config Error Count',
+ datasource='$datasource',
+ reducerFunction='lastNotNull',
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_node_config_error{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='{{instance}}'));
+
+ local operationRate =
+ graphPanel.new(
+ 'Operation Rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_runtime_operations_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (operation_type, instance)' % $._config, legendFormat='{{instance}} {{operation_type}}'));
+
+ local operationErrorRate =
+ graphPanel.new(
+ 'Operation Error Rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_runtime_operations_errors_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_type)' % $._config, legendFormat='{{instance}} {{operation_type}}'));
+
+ local operationLatency =
+ graphPanel.new(
+ 'Operation duration 99th quantile',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_runtime_operations_duration_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_type, le))' % $._config, legendFormat='{{instance}} {{operation_type}}'));
+
+ local podStartRate =
+ graphPanel.new(
+ 'Pod Start Rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_pod_start_duration_seconds_count{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance)' % $._config, legendFormat='{{instance}} pod'))
+ .addTarget(prometheus.target('sum(rate(kubelet_pod_worker_duration_seconds_count{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance)' % $._config, legendFormat='{{instance}} worker'));
+
+ local podStartLatency =
+ graphPanel.new(
+ 'Pod Start Duration',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}} pod'))
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}} worker'));
+
+ local storageOperationRate =
+ graphPanel.new(
+ 'Storage Operation Rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_hideEmpty=true,
+ legend_hideZero=true,
+ )
+ .addTarget(prometheus.target('sum(rate(storage_operation_duration_seconds_count{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_name, volume_plugin)' % $._config, legendFormat='{{instance}} {{operation_name}} {{volume_plugin}}'));
+
+ local storageOperationErrorRate =
+ graphPanel.new(
+ 'Storage Operation Error Rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_hideEmpty=true,
+ legend_hideZero=true,
+ )
+ .addTarget(prometheus.target('sum(rate(storage_operation_errors_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_name, volume_plugin)' % $._config, legendFormat='{{instance}} {{operation_name}} {{volume_plugin}}'));
+
+
+ local storageOperationLatency =
+ graphPanel.new(
+ 'Storage Operation Duration 99th quantile',
+ datasource='$datasource',
+ format='s',
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_hideEmpty=true,
+ legend_hideZero=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(storage_operation_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_name, volume_plugin, le))' % $._config, legendFormat='{{instance}} {{operation_name}} {{volume_plugin}}'));
+
+ local cgroupManagerRate =
+ graphPanel.new(
+ 'Cgroup manager operation rate',
+ datasource='$datasource',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_cgroup_manager_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_type)' % $._config, legendFormat='{{operation_type}}'));
+
+ local cgroupManagerDuration =
+ graphPanel.new(
+ 'Cgroup manager 99th quantile',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_cgroup_manager_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, operation_type, le))' % $._config, legendFormat='{{instance}} {{operation_type}}'));
+
+ local plegRelistRate =
+ graphPanel.new(
+ 'PLEG relist rate',
+ datasource='$datasource',
+ description='Pod lifecycle event generator',
+ format='ops',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(kubelet_pleg_relist_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance)' % $._config, legendFormat='{{instance}}'));
+
+ local plegRelistDuration =
+ graphPanel.new(
+ 'PLEG relist duration',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}}'));
+
+ local plegRelistInterval =
+ graphPanel.new(
+ 'PLEG relist interval',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_interval_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}}'));
+
+ local rpcRate =
+ graphPanel.new(
+ 'RPC Rate',
+ datasource='$datasource',
+ format='ops',
+ )
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='2xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='3xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='4xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='5xx'));
+
+ local requestDuration =
+ graphPanel.new(
+ 'Request duration 99th quantile',
+ datasource='$datasource',
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster",%(kubeletSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, verb, url, le))' % $._config, legendFormat='{{instance}} {{verb}} {{url}}'));
+
+ local memory =
+ graphPanel.new(
+ 'Memory',
+ datasource='$datasource',
+ format='bytes',
+ )
+ .addTarget(prometheus.target('process_resident_memory_bytes{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+ local cpu =
+ graphPanel.new(
+ 'CPU usage',
+ datasource='$datasource',
+ format='short',
+ )
+ .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}'));
+
+ local goroutines =
+ graphPanel.new(
+ 'Goroutines',
+ datasource='$datasource',
+ format='short',
+ )
+ .addTarget(prometheus.target('go_goroutines{%(clusterLabel)s="$cluster",%(kubeletSelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sKubelet' % $._config.grafanaK8s,
+ time_from='now-1h',
+ uid=($._config.grafanaDashboardIDs['kubelet.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(kubeletSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(up{%(kubeletSelector)s,%(clusterLabel)s="$cluster"}, instance)' % $._config,
+ label='instance',
+ refresh='time',
+ includeAll=true,
+ sort=1,
+ )
+ )
+ .addPanel(upCount, gridPos={ h: 7, w: 4, x: 0, y: 0 })
+ .addPanel(runningPodCount, gridPos={ h: 7, w: 4, x: 4, y: 0 })
+ .addPanel(runningContainerCount, gridPos={ h: 7, w: 4, x: 8, y: 0 })
+ .addPanel(actualVolumeCount, gridPos={ h: 7, w: 4, x: 12, y: 0 })
+ .addPanel(desiredVolumeCount, gridPos={ h: 7, w: 4, x: 16, y: 0 })
+ .addPanel(configErrorCount, gridPos={ h: 7, w: 4, x: 20, y: 0 })
+ .addPanel(operationRate, gridPos={ h: 7, w: 12, x: 0, y: 7 })
+ .addPanel(operationErrorRate, gridPos={ h: 7, w: 12, x: 12, y: 7 })
+ .addPanel(operationLatency, gridPos={ h: 7, w: 24, x: 0, y: 14 })
+ .addPanel(podStartRate, gridPos={ h: 7, w: 12, x: 0, y: 21 })
+ .addPanel(podStartLatency, gridPos={ h: 7, w: 12, x: 12, y: 21 })
+ .addPanel(storageOperationRate, gridPos={ h: 7, w: 12, x: 0, y: 28 })
+ .addPanel(storageOperationErrorRate, gridPos={ h: 7, w: 12, x: 12, y: 28 })
+ .addPanel(storageOperationLatency, gridPos={ h: 7, w: 24, x: 0, y: 35 })
+ .addPanel(cgroupManagerRate, gridPos={ h: 7, w: 12, x: 0, y: 42 })
+ .addPanel(cgroupManagerDuration, gridPos={ h: 7, w: 12, x: 12, y: 42 })
+ .addPanel(plegRelistRate, gridPos={ h: 7, w: 12, x: 0, y: 49 })
+ .addPanel(plegRelistInterval, gridPos={ h: 7, w: 12, x: 12, y: 49 })
+ .addPanel(plegRelistDuration, gridPos={ h: 7, w: 24, x: 0, y: 56 })
+ .addPanel(rpcRate, gridPos={ h: 7, w: 24, x: 0, y: 63 })
+ .addPanel(requestDuration, gridPos={ h: 7, w: 24, x: 0, y: 70 })
+ .addPanel(memory, gridPos={ h: 7, w: 8, x: 0, y: 77 })
+ .addPanel(cpu, gridPos={ h: 7, w: 8, x: 8, y: 77 })
+ .addPanel(goroutines, gridPos={ h: 7, w: 8, x: 16, y: 77 }),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/cluster-total.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/cluster-total.libsonnet
new file mode 100644
index 0000000..52e0503
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/cluster-total.libsonnet
@@ -0,0 +1,512 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local tablePanel = grafana.tablePanel;
+local annotation = grafana.annotation;
+
+{
+ grafanaDashboards+:: {
+
+ 'cluster-total.json':
+
+ local newStyle(
+ alias,
+ colorMode=null,
+ colors=[],
+ dateFormat='YYYY-MM-DD HH:mm:ss',
+ decimals=2,
+ link=false,
+ linkTooltip='Drill down',
+ linkUrl='',
+ thresholds=[],
+ type='number',
+ unit='short'
+ ) = {
+ alias: alias,
+ colorMode: colorMode,
+ colors: colors,
+ dateFormat: dateFormat,
+ decimals: decimals,
+ link: link,
+ linkTooltip: linkTooltip,
+ linkUrl: linkUrl,
+ thresholds: thresholds,
+ type: type,
+ unit: unit,
+ };
+
+ local newBarplotPanel(graphTitle, graphQuery, graphFormat='Bps', legendFormat='{{namespace}}') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: legendFormat,
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=24,
+ datasource='$datasource',
+ fill=2,
+ min_span=24,
+ format=graphFormat,
+ min=0,
+ max=null,
+ show_xaxis=false,
+ x_axis_mode='series',
+ x_axis_values='current',
+ lines=false,
+ bars=true,
+ stack=false,
+ legend_show=true,
+ legend_values=true,
+ legend_min=false,
+ legend_max=false,
+ legend_current=true,
+ legend_avg=false,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_sort='current',
+ legend_sortDesc=true,
+ nullPointMode='null'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newGraphPanel(graphTitle, graphQuery, graphFormat='Bps', legendFormat='{{namespace}}') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: legendFormat,
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=24,
+ datasource='$datasource',
+ fill=2,
+ linewidth=2,
+ min_span=24,
+ format=graphFormat,
+ min=0,
+ max=null,
+ x_axis_mode='time',
+ x_axis_values='total',
+ lines=true,
+ stack=true,
+ legend_show=true,
+ legend_values=true,
+ legend_min=true,
+ legend_max=true,
+ legend_current=true,
+ legend_avg=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ nullPointMode='connected'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newTablePanel(tableTitle, colQueries) =
+ local buildTarget(index, colQuery) =
+ prometheus.target(
+ colQuery,
+ format='table',
+ instant=true,
+ ) + {
+ legendFormat: '',
+ step: 10,
+ refId: std.char(65 + index),
+ };
+
+ local targets = std.mapWithIndex(buildTarget, colQueries);
+
+ tablePanel.new(
+ title=tableTitle,
+ span=24,
+ min_span=24,
+ datasource='$datasource',
+ )
+ .addColumn(
+ field='Time',
+ style=newStyle(
+ alias='Time',
+ type='hidden'
+ )
+ )
+ .addColumn(
+ field='Value #A',
+ style=newStyle(
+ alias='Current Bandwidth Received',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #B',
+ style=newStyle(
+ alias='Current Bandwidth Transmitted',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #C',
+ style=newStyle(
+ alias='Average Bandwidth Received',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #D',
+ style=newStyle(
+ alias='Average Bandwidth Transmitted',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #E',
+ style=newStyle(
+ alias='Rate of Received Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #F',
+ style=newStyle(
+ alias='Rate of Transmitted Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #G',
+ style=newStyle(
+ alias='Rate of Received Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #H',
+ style=newStyle(
+ alias='Rate of Transmitted Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='namespace',
+ style=newStyle(
+ alias='Namespace',
+ link=true,
+ linkUrl='d/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&refresh=30s&var-namespace=$__cell',
+ ),
+ ) + {
+
+ fill: 1,
+ fontSize: '90%',
+ lines: true,
+ linewidth: 1,
+ nullPointMode: 'null as zero',
+ renderer: 'flot',
+ scroll: true,
+ showHeader: true,
+ spaceLength: 10,
+ sort: {
+ col: 0,
+ desc: false,
+ },
+ targets: targets,
+ };
+
+ local resolutionTemplate =
+ template.new(
+ name='resolution',
+ datasource='$datasource',
+ query='30s,5m,1h',
+ current='5m',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: false,
+ text: '30s',
+ value: '30s',
+ },
+ {
+ selected: true,
+ text: '5m',
+ value: '5m',
+ },
+ {
+ selected: false,
+ text: '1h',
+ value: '1h',
+ },
+ ],
+ };
+
+ local intervalTemplate =
+ template.new(
+ name='interval',
+ datasource='$datasource',
+ query='4h',
+ current='5m',
+ hide=2,
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: true,
+ text: '4h',
+ value: '4h',
+ },
+ ],
+ };
+
+ //##### Current Bandwidth Row ######
+
+ local currentBandwidthRow =
+ row.new(
+ title='Current Bandwidth'
+ );
+
+ //##### Average Bandwidth Row ######
+
+ local averageBandwidthRow =
+ row.new(
+ title='Average Bandwidth',
+ collapse=true,
+ );
+
+ //##### Bandwidth History Row ######
+ local bandwidthHistoryRow =
+ row.new(
+ title='Bandwidth History'
+ );
+
+ //##### Packet Row ######
+ // collapsed, so row must include panels
+ local packetRow =
+ row.new(
+ title='Packets',
+ collapse=true,
+ );
+
+ //##### Error Row ######
+ // collapsed, so row must include panels
+ local errorRow =
+ row.new(
+ title='Errors',
+ collapse=true,
+ );
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(cadvisorSelector)s}, %(clusterLabel)s)' % $._config,
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2
+ );
+
+ dashboard.new(
+ title='%(dashboardNamePrefix)sNetworking / Cluster' % $._config.grafanaK8s,
+ tags=($._config.grafanaK8s.dashboardTags),
+ editable=true,
+ schemaVersion=18,
+ refresh=($._config.grafanaK8s.refresh),
+ time_from='now-1h',
+ time_to='now',
+ )
+ .addTemplate(resolutionTemplate)
+ .addTemplate(intervalTemplate)
+ .addAnnotation(annotation.default)
+ .addPanel(
+ currentBandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 0 }
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(clusterTemplate)
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Received',
+ graphQuery='sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 1 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Transmitted',
+ graphQuery='sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 1 }
+ )
+ .addPanel(
+ newTablePanel(
+ tableTitle='Current Status',
+ colQueries=[
+ 'sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(avg(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(avg(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ 'sort_desc(sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ]
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 10 }
+ )
+ .addPanel(
+ averageBandwidthRow
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Received',
+ graphQuery='sort_desc(avg(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 11 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Transmitted',
+ graphQuery='sort_desc(avg(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 11 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 10 },
+ )
+ .addPanel(
+ bandwidthHistoryRow, gridPos={ h: 1, w: 24, x: 0, y: 11 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Receive Bandwidth',
+ graphQuery='sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 12 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Transmit Bandwidth',
+ graphQuery='sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 21 }
+ )
+ .addPanel(
+ packetRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets',
+ graphQuery='sort_desc(sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 31 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets',
+ graphQuery='sort_desc(sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 40 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 30 }
+ )
+ .addPanel(
+ errorRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets Dropped',
+ graphQuery='sort_desc(sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 50 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets Dropped',
+ graphQuery='sort_desc(sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~".+"}[$interval:$resolution])) by (namespace))' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 59 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of TCP Retransmits out of all sent segments',
+ graphQuery='sort_desc(sum(rate(node_netstat_Tcp_RetransSegs{%(clusterLabel)s="$cluster"}[$interval:$resolution]) / rate(node_netstat_Tcp_OutSegs{%(clusterLabel)s="$cluster"}[$interval:$resolution])) by (instance))' % $._config,
+ graphFormat='percentunit',
+ legendFormat='{{instance}}'
+ ) + { links: [
+ {
+ url: 'https://accedian.com/enterprises/blog/network-packet-loss-retransmissions-and-duplicate-acknowledgements/',
+ title: 'What is TCP Retransmit?',
+ targetBlank: true,
+ },
+ ] },
+ gridPos={ h: 9, w: 24, x: 0, y: 59 }
+ ).addPanel(
+ newGraphPanel(
+ graphTitle='Rate of TCP SYN Retransmits out of all retransmits',
+ graphQuery='sort_desc(sum(rate(node_netstat_TcpExt_TCPSynRetrans{%(clusterLabel)s="$cluster"}[$interval:$resolution]) / rate(node_netstat_Tcp_RetransSegs{%(clusterLabel)s="$cluster"}[$interval:$resolution])) by (instance))' % $._config,
+ graphFormat='percentunit',
+ legendFormat='{{instance}}'
+ ) + { links: [
+ {
+ url: 'https://github.com/prometheus/node_exporter/issues/1023#issuecomment-408128365',
+ title: 'Why monitor SYN retransmits?',
+ targetBlank: true,
+ },
+ ] },
+ gridPos={ h: 9, w: 24, x: 0, y: 59 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 31 }
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-pod.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-pod.libsonnet
new file mode 100644
index 0000000..4ef52ba
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-pod.libsonnet
@@ -0,0 +1,463 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local tablePanel = grafana.tablePanel;
+local annotation = grafana.annotation;
+local singlestat = grafana.singlestat;
+
+{
+ grafanaDashboards+:: {
+
+ 'namespace-by-pod.json':
+
+ local newStyle(
+ alias,
+ colorMode=null,
+ colors=[],
+ dateFormat='YYYY-MM-DD HH:mm:ss',
+ decimals=2,
+ link=false,
+ linkTooltip='Drill down',
+ linkUrl='',
+ thresholds=[],
+ type='number',
+ unit='short'
+ ) = {
+ alias: alias,
+ colorMode: colorMode,
+ colors: colors,
+ dateFormat: dateFormat,
+ decimals: decimals,
+ link: link,
+ linkTooltip: linkTooltip,
+ linkUrl: linkUrl,
+ thresholds: thresholds,
+ type: type,
+ unit: unit,
+ };
+
+ local newGaugePanel(gaugeTitle, gaugeQuery) =
+ local target =
+ prometheus.target(
+ gaugeQuery,
+ ) + {
+ instant: null,
+ intervalFactor: 1,
+ };
+
+ singlestat.new(
+ title=gaugeTitle,
+ datasource='$datasource',
+ format='time_series',
+ height=9,
+ span=12,
+ min_span=12,
+ decimals=0,
+ valueName='current'
+ ).addTarget(target) + {
+ timeFrom: null,
+ timeShift: null,
+ type: 'gauge',
+ options: {
+ fieldOptions: {
+ calcs: [
+ 'last',
+ ],
+ defaults: {
+ max: 10000000000, // 10GBs
+ min: 0,
+ title: '$namespace',
+ unit: 'Bps',
+ },
+ mappings: [],
+ override: {},
+ thresholds: [
+ {
+ color: 'dark-green',
+ index: 0,
+ value: null, // 0GBs
+ },
+ {
+ color: 'dark-yellow',
+ index: 1,
+ value: 5000000000, // 5GBs
+ },
+ {
+ color: 'dark-red',
+ index: 2,
+ value: 7000000000, // 7GBs
+ },
+ ],
+ values: false,
+ },
+ },
+ };
+
+ local newGraphPanel(graphTitle, graphQuery, graphFormat='Bps') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: '{{pod}}',
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=12,
+ datasource='$datasource',
+ fill=2,
+ linewidth=2,
+ min_span=12,
+ format=graphFormat,
+ min=0,
+ max=null,
+ x_axis_mode='time',
+ x_axis_values='total',
+ lines=true,
+ stack=true,
+ legend_show=true,
+ nullPointMode='connected'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newTablePanel(tableTitle, colQueries) =
+ local buildTarget(index, colQuery) =
+ prometheus.target(
+ colQuery,
+ format='table',
+ instant=true,
+ ) + {
+ legendFormat: '',
+ step: 10,
+ refId: std.char(65 + index),
+ };
+
+ local targets = std.mapWithIndex(buildTarget, colQueries);
+
+ tablePanel.new(
+ title=tableTitle,
+ span=24,
+ min_span=24,
+ datasource='$datasource',
+ )
+ .addColumn(
+ field='Time',
+ style=newStyle(
+ alias='Time',
+ type='hidden',
+ )
+ )
+ .addColumn(
+ field='Value #A',
+ style=newStyle(
+ alias='Bandwidth Received',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #B',
+ style=newStyle(
+ alias='Bandwidth Transmitted',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #C',
+ style=newStyle(
+ alias='Rate of Received Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #D',
+ style=newStyle(
+ alias='Rate of Transmitted Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #E',
+ style=newStyle(
+ alias='Rate of Received Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #F',
+ style=newStyle(
+ alias='Rate of Transmitted Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='pod',
+ style=newStyle(
+ alias='Pod',
+ link=true,
+ linkUrl='d/7a18067ce943a40ae25454675c19ff5c/kubernetes-networking-pod?orgId=1&refresh=30s&var-namespace=$namespace&var-pod=$__cell'
+ ),
+ ) + {
+
+ fill: 1,
+ fontSize: '100%',
+ lines: true,
+ linewidth: 1,
+ nullPointMode: 'null as zero',
+ renderer: 'flot',
+ scroll: true,
+ showHeader: true,
+ spaceLength: 10,
+ sort: {
+ col: 0,
+ desc: false,
+ },
+ targets: targets,
+ };
+
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(cadvisorSelector)s}, %(clusterLabel)s)' % $._config,
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2
+ );
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ allValues='.+',
+ current='kube-system',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local resolutionTemplate =
+ template.new(
+ name='resolution',
+ datasource='$datasource',
+ query='30s,5m,1h',
+ current='5m',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: false,
+ text: '30s',
+ value: '30s',
+ },
+ {
+ selected: true,
+ text: '5m',
+ value: '5m',
+ },
+ {
+ selected: false,
+ text: '1h',
+ value: '1h',
+ },
+ ],
+ };
+
+ local intervalTemplate =
+ template.new(
+ name='interval',
+ datasource='$datasource',
+ query='4h',
+ current='5m',
+ hide=2,
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: true,
+ text: '4h',
+ value: '4h',
+ },
+ ],
+ };
+
+ //##### Current Bandwidth Row ######
+
+ local currentBandwidthRow =
+ row.new(
+ title='Current Bandwidth'
+ );
+
+ //##### Bandwidth Row ######
+
+ local bandwidthRow =
+ row.new(
+ title='Bandwidth'
+ );
+
+ //##### Packet Row ######
+ // collapsed, so row must include panels
+ local packetRow =
+ row.new(
+ title='Packets',
+ collapse=true,
+ );
+
+ //##### Error Row ######
+ // collapsed, so row must include panels
+ local errorRow =
+ row.new(
+ title='Errors',
+ collapse=true,
+ );
+
+ dashboard.new(
+ title='%(dashboardNamePrefix)sNetworking / Namespace (Pods)' % $._config.grafanaK8s,
+ tags=($._config.grafanaK8s.dashboardTags),
+ editable=true,
+ schemaVersion=18,
+ refresh=($._config.grafanaK8s.refresh),
+ time_from='now-1h',
+ time_to='now',
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(clusterTemplate)
+ .addTemplate(namespaceTemplate)
+ .addTemplate(resolutionTemplate)
+ .addTemplate(intervalTemplate)
+ .addAnnotation(annotation.default)
+ .addPanel(currentBandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 0 })
+ .addPanel(
+ newGaugePanel(
+ gaugeTitle='Current Rate of Bytes Received',
+ gaugeQuery='sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution]))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 1 }
+ )
+ .addPanel(
+ newGaugePanel(
+ gaugeTitle='Current Rate of Bytes Transmitted',
+ gaugeQuery='sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution]))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 1 }
+ )
+ .addPanel(
+ newTablePanel(
+ tableTitle='Current Status',
+ colQueries=[
+ 'sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ 'sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ 'sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ ]
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 10 }
+ )
+ .addPanel(bandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 19 })
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Receive Bandwidth',
+ graphQuery='sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 20 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Transmit Bandwidth',
+ graphQuery='sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 20 }
+ )
+ .addPanel(
+ packetRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets',
+ graphQuery='sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 0, y: 30 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets',
+ graphQuery='sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 12, y: 30 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 29 }
+ )
+ .addPanel(
+ errorRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets Dropped',
+ graphQuery='sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 0, y: 40 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets Dropped',
+ graphQuery='sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 12, y: 40 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 30 }
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-workload.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-workload.libsonnet
new file mode 100644
index 0000000..310107d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/namespace-by-workload.libsonnet
@@ -0,0 +1,591 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local tablePanel = grafana.tablePanel;
+local annotation = grafana.annotation;
+
+{
+ grafanaDashboards+:: {
+
+ 'namespace-by-workload.json':
+
+ local newStyle(
+ alias,
+ colorMode=null,
+ colors=[],
+ dateFormat='YYYY-MM-DD HH:mm:ss',
+ decimals=2,
+ link=false,
+ linkTooltip='Drill down',
+ linkUrl='',
+ thresholds=[],
+ type='number',
+ unit='short'
+ ) = {
+ alias: alias,
+ colorMode: colorMode,
+ colors: colors,
+ dateFormat: dateFormat,
+ decimals: decimals,
+ link: link,
+ linkTooltip: linkTooltip,
+ linkUrl: linkUrl,
+ thresholds: thresholds,
+ type: type,
+ unit: unit,
+ };
+
+ local newBarplotPanel(graphTitle, graphQuery, graphFormat='Bps', legendFormat='{{namespace}}') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: legendFormat,
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=24,
+ datasource='$datasource',
+ fill=2,
+ min_span=24,
+ format=graphFormat,
+ min=0,
+ max=null,
+ show_xaxis=false,
+ x_axis_mode='series',
+ x_axis_values='current',
+ lines=false,
+ bars=true,
+ stack=false,
+ legend_show=true,
+ legend_values=true,
+ legend_min=false,
+ legend_max=false,
+ legend_current=true,
+ legend_avg=false,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_sort='current',
+ legend_sortDesc=true,
+ nullPointMode='null'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newGraphPanel(graphTitle, graphQuery, graphFormat='Bps') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: '{{workload}}',
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=12,
+ datasource='$datasource',
+ fill=2,
+ linewidth=2,
+ min_span=12,
+ format=graphFormat,
+ min=0,
+ max=null,
+ x_axis_mode='time',
+ x_axis_values='total',
+ lines=true,
+ stack=true,
+ legend_show=true,
+ nullPointMode='connected'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newTablePanel(tableTitle, colQueries) =
+ local buildTarget(index, colQuery) =
+ prometheus.target(
+ colQuery,
+ format='table',
+ instant=true,
+ ) + {
+ legendFormat: '',
+ step: 10,
+ refId: std.char(65 + index),
+ };
+
+ local targets = std.mapWithIndex(buildTarget, colQueries);
+
+ tablePanel.new(
+ title=tableTitle,
+ span=24,
+ min_span=24,
+ datasource='$datasource',
+ )
+ .addColumn(
+ field='Time',
+ style=newStyle(
+ alias='Time',
+ type='hidden'
+ )
+ )
+ .addColumn(
+ field='Value #A',
+ style=newStyle(
+ alias='Current Bandwidth Received',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #B',
+ style=newStyle(
+ alias='Current Bandwidth Transmitted',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #C',
+ style=newStyle(
+ alias='Average Bandwidth Received',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #D',
+ style=newStyle(
+ alias='Average Bandwidth Transmitted',
+ unit='Bps',
+ ),
+ )
+ .addColumn(
+ field='Value #E',
+ style=newStyle(
+ alias='Rate of Received Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #F',
+ style=newStyle(
+ alias='Rate of Transmitted Packets',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #G',
+ style=newStyle(
+ alias='Rate of Received Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='Value #H',
+ style=newStyle(
+ alias='Rate of Transmitted Packets Dropped',
+ unit='pps',
+ ),
+ )
+ .addColumn(
+ field='workload',
+ style=newStyle(
+ alias='Workload',
+ link=true,
+ linkUrl='d/728bf77cc1166d2f3133bf25846876cc/kubernetes-networking-workload?orgId=1&refresh=30s&var-namespace=$namespace&var-type=$type&var-workload=$__cell'
+ ),
+ ) + {
+
+ fill: 1,
+ fontSize: '90%',
+ lines: true,
+ linewidth: 1,
+ nullPointMode: 'null as zero',
+ renderer: 'flot',
+ scroll: true,
+ showHeader: true,
+ spaceLength: 10,
+ sort: {
+ col: 0,
+ desc: false,
+ },
+ targets: targets,
+ };
+
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(cadvisorSelector)s}, %(clusterLabel)s)' % $._config,
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2
+ );
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ current='kube-system',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local typeTemplate =
+ template.new(
+ name='type',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+"}, workload_type)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=0
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+"}, workload_type)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local resolutionTemplate =
+ template.new(
+ name='resolution',
+ datasource='$datasource',
+ query='30s,5m,1h',
+ current='5m',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: false,
+ text: '30s',
+ value: '30s',
+ },
+ {
+ selected: true,
+ text: '5m',
+ value: '5m',
+ },
+ {
+ selected: false,
+ text: '1h',
+ value: '1h',
+ },
+ ],
+ };
+
+ local intervalTemplate =
+ template.new(
+ name='interval',
+ datasource='$datasource',
+ query='4h',
+ current='5m',
+ hide=2,
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: true,
+ text: '4h',
+ value: '4h',
+ },
+ ],
+ };
+
+ //##### Current Bandwidth Row ######
+
+ local currentBandwidthRow =
+ row.new(
+ title='Current Bandwidth'
+ );
+
+ //##### Average Bandwidth Row ######
+
+ local averageBandwidthRow =
+ row.new(
+ title='Average Bandwidth',
+ collapse=true,
+ );
+
+ //##### Bandwidth History Row ######
+
+ local bandwidthHistoryRow =
+ row.new(
+ title='Bandwidth HIstory',
+ );
+
+ //##### Packet Row ######
+ // collapsed, so row must include panels
+ local packetRow =
+ row.new(
+ title='Packets',
+ collapse=true,
+ );
+
+ //##### Error Row ######
+ // collapsed, so row must include panels
+ local errorRow =
+ row.new(
+ title='Errors',
+ collapse=true,
+ );
+
+ dashboard.new(
+ title='%(dashboardNamePrefix)sNetworking / Namespace (Workload)' % $._config.grafanaK8s,
+ tags=($._config.grafanaK8s.dashboardTags),
+ editable=true,
+ schemaVersion=18,
+ refresh=($._config.grafanaK8s.refresh),
+ time_from='now-1h',
+ time_to='now',
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(clusterTemplate)
+ .addTemplate(namespaceTemplate)
+ .addTemplate(typeTemplate)
+ .addTemplate(resolutionTemplate)
+ .addTemplate(intervalTemplate)
+ .addAnnotation(annotation.default)
+ .addPanel(currentBandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 0 })
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Received',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ legendFormat='{{ workload }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 1 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Transmitted',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ legendFormat='{{ workload }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 1 }
+ )
+ .addPanel(
+ newTablePanel(
+ tableTitle='Current Status',
+ colQueries=[
+ |||
+ sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(avg(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(avg(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ sort_desc(sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ ]
+ ),
+ gridPos={ h: 9, w: 24, x: 0, y: 10 }
+ )
+ .addPanel(
+ averageBandwidthRow
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Received',
+ graphQuery=|||
+ sort_desc(avg(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ legendFormat='{{ workload }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 20 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Transmitted',
+ graphQuery=|||
+ sort_desc(avg(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ legendFormat='{{ workload }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 20 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 19 },
+ )
+ .addPanel(
+ bandwidthHistoryRow, gridPos={ h: 1, w: 24, x: 0, y: 29 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Receive Bandwidth',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 38 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Transmit Bandwidth',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 38 }
+ )
+ .addPanel(
+ packetRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 40 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 40 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 39 }
+ )
+ .addPanel(
+ errorRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets Dropped',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 41 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets Dropped',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace="$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 41 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 40 }
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/pod-total.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/pod-total.libsonnet
new file mode 100644
index 0000000..d9815ab
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/pod-total.libsonnet
@@ -0,0 +1,349 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local annotation = grafana.annotation;
+local singlestat = grafana.singlestat;
+
+{
+ grafanaDashboards+:: {
+
+ 'pod-total.json':
+
+ local newGaugePanel(gaugeTitle, gaugeQuery) =
+ local target =
+ prometheus.target(
+ gaugeQuery
+ ) + {
+ instant: null,
+ intervalFactor: 1,
+ };
+
+ singlestat.new(
+ title=gaugeTitle,
+ datasource='$datasource',
+ format='time_series',
+ height=9,
+ span=12,
+ min_span=12,
+ decimals=0,
+ valueName='current'
+ ).addTarget(target) + {
+ timeFrom: null,
+ timeShift: null,
+ type: 'gauge',
+ options: {
+ fieldOptions: {
+ calcs: [
+ 'last',
+ ],
+ defaults: {
+ max: 10000000000, // 10GBs
+ min: 0,
+ title: '$namespace: $pod',
+ unit: 'Bps',
+ },
+ mappings: [],
+ override: {},
+ thresholds: [
+ {
+ color: 'dark-green',
+ index: 0,
+ value: null, // 0GBs
+ },
+ {
+ color: 'dark-yellow',
+ index: 1,
+ value: 5000000000, // 5GBs
+ },
+ {
+ color: 'dark-red',
+ index: 2,
+ value: 7000000000, // 7GBs
+ },
+ ],
+ values: false,
+ },
+ },
+ };
+
+ local newGraphPanel(graphTitle, graphQuery, graphFormat='Bps') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: '{{pod}}',
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=12,
+ datasource='$datasource',
+ fill=2,
+ linewidth=2,
+ min_span=12,
+ format=graphFormat,
+ min=0,
+ max=null,
+ x_axis_mode='time',
+ x_axis_values='total',
+ lines=true,
+ stack=true,
+ legend_show=true,
+ nullPointMode='connected'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(cadvisorSelector)s}, %(clusterLabel)s)' % $._config,
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2
+ );
+
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ allValues='.+',
+ current='kube-system',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local podTemplate =
+ template.new(
+ name='pod',
+ datasource='$datasource',
+ query='label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}, pod)' % $._config,
+ allValues='.+',
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace"}, pod)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local resolutionTemplate =
+ template.new(
+ name='resolution',
+ datasource='$datasource',
+ query='30s,5m,1h',
+ current='5m',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: false,
+ text: '30s',
+ value: '30s',
+ },
+ {
+ selected: true,
+ text: '5m',
+ value: '5m',
+ },
+ {
+ selected: false,
+ text: '1h',
+ value: '1h',
+ },
+ ],
+ };
+
+ local intervalTemplate =
+ template.new(
+ name='interval',
+ datasource='$datasource',
+ query='4h',
+ current='5m',
+ hide=2,
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: true,
+ text: '4h',
+ value: '4h',
+ },
+ ],
+ };
+
+ //##### Current Bandwidth Row ######
+
+ local currentBandwidthRow =
+ row.new(
+ title='Current Bandwidth'
+ );
+
+ //##### Bandwidth Row ######
+
+ local bandwidthRow =
+ row.new(
+ title='Bandwidth'
+ );
+
+ //##### Packet Row ######
+ // collapsed, so row must include panels
+ local packetRow =
+ row.new(
+ title='Packets',
+ collapse=true,
+ );
+
+ //##### Error Row ######
+ // collapsed, so row must include panels
+ local errorRow =
+ row.new(
+ title='Errors',
+ collapse=true,
+ );
+
+ dashboard.new(
+ title='%(dashboardNamePrefix)sNetworking / Pod' % $._config.grafanaK8s,
+ tags=($._config.grafanaK8s.dashboardTags),
+ editable=true,
+ schemaVersion=18,
+ refresh=($._config.grafanaK8s.refresh),
+ time_from='now-1h',
+ time_to='now',
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(clusterTemplate)
+ .addTemplate(namespaceTemplate)
+ .addTemplate(podTemplate)
+ .addTemplate(resolutionTemplate)
+ .addTemplate(intervalTemplate)
+ .addAnnotation(annotation.default)
+ .addPanel(currentBandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 0 })
+ .addPanel(
+ newGaugePanel(
+ gaugeTitle='Current Rate of Bytes Received',
+ gaugeQuery='sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution]))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 1 }
+ )
+ .addPanel(
+ newGaugePanel(
+ gaugeTitle='Current Rate of Bytes Transmitted',
+ gaugeQuery='sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution]))' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 1 }
+ )
+ .addPanel(bandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 10 })
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Receive Bandwidth',
+ graphQuery='sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 11 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Transmit Bandwidth',
+ graphQuery='sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 11 }
+ )
+ .addPanel(
+ packetRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets',
+ graphQuery='sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 0, y: 21 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets',
+ graphQuery='sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 12, y: 21 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 20 }
+ )
+ .addPanel(
+ errorRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets Dropped',
+ graphQuery='sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 0, y: 32 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets Dropped',
+ graphQuery='sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster",namespace=~"$namespace", pod=~"$pod"}[$interval:$resolution])) by (pod)' % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 10, w: 12, x: 12, y: 32 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 21 }
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/workload-total.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/workload-total.libsonnet
new file mode 100644
index 0000000..5bff786
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network-usage/workload-total.libsonnet
@@ -0,0 +1,427 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local annotation = grafana.annotation;
+
+{
+ grafanaDashboards+:: {
+
+ 'workload-total.json':
+
+ local newBarplotPanel(graphTitle, graphQuery, graphFormat='Bps', legendFormat='{{namespace}}') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: legendFormat,
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=24,
+ datasource='$datasource',
+ fill=2,
+ min_span=24,
+ format=graphFormat,
+ min=0,
+ max=null,
+ show_xaxis=false,
+ x_axis_mode='series',
+ x_axis_values='current',
+ lines=false,
+ bars=true,
+ stack=false,
+ legend_show=true,
+ legend_values=true,
+ legend_min=false,
+ legend_max=false,
+ legend_current=true,
+ legend_avg=false,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ legend_sort='current',
+ legend_sortDesc=true,
+ nullPointMode='null'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local newGraphPanel(graphTitle, graphQuery, graphFormat='Bps') =
+ local target =
+ prometheus.target(
+ graphQuery
+ ) + {
+ intervalFactor: 1,
+ legendFormat: '{{pod}}',
+ step: 10,
+ };
+
+ graphPanel.new(
+ title=graphTitle,
+ span=12,
+ datasource='$datasource',
+ fill=2,
+ linewidth=2,
+ min_span=12,
+ format=graphFormat,
+ min=0,
+ max=null,
+ x_axis_mode='time',
+ x_axis_values='total',
+ lines=true,
+ stack=true,
+ legend_show=true,
+ nullPointMode='connected'
+ ).addTarget(target) + {
+ legend+: {
+ hideEmpty: true,
+ hideZero: true,
+ },
+ paceLength: 10,
+ tooltip+: {
+ sort: 2,
+ },
+ };
+
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(kube_pod_info{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2
+ );
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ allValues='.+',
+ current='kube-system',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local workloadTemplate =
+ template.new(
+ name='workload',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace"}, workload)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace"}, workload)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local typeTemplate =
+ template.new(
+ name='type',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload"}, workload_type)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=0
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload"}, workload_type)' % $._config,
+ skipUrlSync: false,
+ };
+
+ local resolutionTemplate =
+ template.new(
+ name='resolution',
+ datasource='$datasource',
+ query='30s,5m,1h',
+ current='5m',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: false,
+ text: '30s',
+ value: '30s',
+ },
+ {
+ selected: true,
+ text: '5m',
+ value: '5m',
+ },
+ {
+ selected: false,
+ text: '1h',
+ value: '1h',
+ },
+ ],
+ };
+
+ local intervalTemplate =
+ template.new(
+ name='interval',
+ datasource='$datasource',
+ query='4h',
+ current='5m',
+ hide=2,
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ skipUrlSync: false,
+ type: 'interval',
+ options: [
+ {
+ selected: true,
+ text: '4h',
+ value: '4h',
+ },
+ ],
+ };
+
+ //##### Current Bandwidth Row ######
+
+ local currentBandwidthRow =
+ row.new(
+ title='Current Bandwidth'
+ );
+
+ //##### Average Bandwidth Row ######
+
+ local averageBandwidthRow =
+ row.new(
+ title='Average Bandwidth',
+ collapse=true,
+ );
+
+ //##### Bandwidth History Row ######
+
+ local bandwidthHistoryRow =
+ row.new(
+ title='Bandwidth HIstory',
+ );
+
+ //##### Packet Row ######
+ // collapsed, so row must include panels
+ local packetRow =
+ row.new(
+ title='Packets',
+ collapse=true,
+ );
+
+ //##### Error Row ######
+ // collapsed, so row must include panels
+ local errorRow =
+ row.new(
+ title='Errors',
+ collapse=true,
+ );
+
+ dashboard.new(
+ title='%(dashboardNamePrefix)sNetworking / Workload' % $._config.grafanaK8s,
+ tags=($._config.grafanaK8s.dashboardTags),
+ editable=true,
+ schemaVersion=18,
+ refresh=($._config.grafanaK8s.refresh),
+ time_from='now-1h',
+ time_to='now',
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(clusterTemplate)
+ .addTemplate(namespaceTemplate)
+ .addTemplate(workloadTemplate)
+ .addTemplate(typeTemplate)
+ .addTemplate(resolutionTemplate)
+ .addTemplate(intervalTemplate)
+ .addAnnotation(annotation.default)
+ .addPanel(currentBandwidthRow, gridPos={ h: 1, w: 24, x: 0, y: 0 })
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Received',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ legendFormat='{{ pod }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 1 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Current Rate of Bytes Transmitted',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ legendFormat='{{ pod }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 1 }
+ )
+ .addPanel(
+ averageBandwidthRow
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Received',
+ graphQuery=|||
+ sort_desc(avg(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ legendFormat='{{ pod }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 11 }
+ )
+ .addPanel(
+ newBarplotPanel(
+ graphTitle='Average Rate of Bytes Transmitted',
+ graphQuery=|||
+ sort_desc(avg(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ legendFormat='{{ pod }}',
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 11 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 10 },
+ )
+ .addPanel(
+ bandwidthHistoryRow, gridPos={ h: 1, w: 24, x: 0, y: 11 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Receive Bandwidth',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 12 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Transmit Bandwidth',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 12 }
+ )
+ .addPanel(
+ packetRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 22 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 22 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 21 }
+ )
+ .addPanel(
+ errorRow
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Received Packets Dropped',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 0, y: 23 }
+ )
+ .addPanel(
+ newGraphPanel(
+ graphTitle='Rate of Transmitted Packets Dropped',
+ graphQuery=|||
+ sort_desc(sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster",namespace=~"$namespace"}[$interval:$resolution])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster",namespace=~"$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ graphFormat='pps'
+ ),
+ gridPos={ h: 9, w: 12, x: 12, y: 23 }
+ ),
+ gridPos={ h: 1, w: 24, x: 0, y: 22 }
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network.libsonnet
new file mode 100644
index 0000000..5d992e4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/network.libsonnet
@@ -0,0 +1,5 @@
+(import 'network-usage/cluster-total.libsonnet') +
+(import 'network-usage/namespace-by-workload.libsonnet') +
+(import 'network-usage/namespace-by-pod.libsonnet') +
+(import 'network-usage/pod-total.libsonnet') +
+(import 'network-usage/workload-total.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/persistentvolumesusage.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/persistentvolumesusage.libsonnet
new file mode 100644
index 0000000..a5bdd7e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/persistentvolumesusage.libsonnet
@@ -0,0 +1,172 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local promgrafonnet = import '../lib/promgrafonnet/promgrafonnet.libsonnet';
+local gauge = promgrafonnet.gauge;
+
+{
+ grafanaDashboards+:: {
+ 'persistentvolumesusage.json':
+ local sizeGraph = graphPanel.new(
+ 'Volume Space Usage',
+ datasource='$datasource',
+ format='bytes',
+ min=0,
+ span=9,
+ stack=true,
+ legend_show=true,
+ legend_values=true,
+ legend_min=true,
+ legend_max=true,
+ legend_current=true,
+ legend_total=false,
+ legend_avg=true,
+ legend_alignAsTable=true,
+ legend_rightSide=false,
+ ).addTarget(prometheus.target(
+ |||
+ (
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_capacity_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ -
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_available_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ )
+ ||| % $._config,
+ legendFormat='Used Space',
+ intervalFactor=1,
+ )).addTarget(prometheus.target(
+ |||
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_available_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ ||| % $._config,
+ legendFormat='Free Space',
+ intervalFactor=1,
+ ));
+
+ local sizeGauge = gauge.new(
+ 'Volume Space Usage',
+ |||
+ max without(instance,node) (
+ (
+ topk(1, kubelet_volume_stats_capacity_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})
+ -
+ topk(1, kubelet_volume_stats_available_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})
+ )
+ /
+ topk(1, kubelet_volume_stats_capacity_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})
+ * 100)
+ ||| % $._config,
+ ).withLowerBeingBetter();
+
+
+ local inodesGraph = graphPanel.new(
+ 'Volume inodes Usage',
+ datasource='$datasource',
+ format='none',
+ min=0,
+ span=9,
+ stack=true,
+ legend_show=true,
+ legend_values=true,
+ legend_min=true,
+ legend_max=true,
+ legend_current=true,
+ legend_total=false,
+ legend_avg=true,
+ legend_alignAsTable=true,
+ legend_rightSide=false,
+ ).addTarget(prometheus.target(
+ |||
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes_used{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ ||| % $._config,
+ legendFormat='Used inodes',
+ intervalFactor=1,
+ )).addTarget(prometheus.target(
+ |||
+ (
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ -
+ sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes_used{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})))
+ )
+ ||| % $._config,
+ legendFormat=' Free inodes',
+ intervalFactor=1,
+ ));
+
+ local inodeGauge = gauge.new(
+ 'Volume inodes Usage',
+ |||
+ max without(instance,node) (
+ topk(1, kubelet_volume_stats_inodes_used{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})
+ /
+ topk(1, kubelet_volume_stats_inodes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace", persistentvolumeclaim="$volume"})
+ * 100)
+ ||| % $._config,
+ ).withLowerBeingBetter();
+
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sPersistent Volumes' % $._config.grafanaK8s,
+ time_from='now-7d',
+ uid=($._config.grafanaDashboardIDs['persistentvolumesusage.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(kubelet_volume_stats_capacity_bytes{%(kubeletSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'namespace',
+ '$datasource',
+ 'label_values(kubelet_volume_stats_capacity_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s}, namespace)' % $._config,
+ label='Namespace',
+ refresh='time',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'volume',
+ '$datasource',
+ 'label_values(kubelet_volume_stats_capacity_bytes{%(clusterLabel)s="$cluster", %(kubeletSelector)s, namespace="$namespace"}, persistentvolumeclaim)' % $._config,
+ label='PersistentVolumeClaim',
+ refresh='time',
+ sort=1,
+ )
+ )
+ .addRow(
+ row.new()
+ .addPanel(sizeGraph)
+ .addPanel(sizeGauge)
+ )
+ .addRow(
+ row.new()
+ .addPanel(inodesGraph)
+ .addPanel(inodeGauge)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/proxy.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/proxy.libsonnet
new file mode 100644
index 0000000..84198c1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/proxy.libsonnet
@@ -0,0 +1,202 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local singlestat = grafana.singlestat;
+
+{
+ grafanaDashboards+:: {
+ 'proxy.json':
+ local upCount =
+ singlestat.new(
+ 'Up',
+ datasource='$datasource',
+ span=2,
+ valueName='min',
+ )
+ .addTarget(prometheus.target('sum(up{%(clusterLabel)s="$cluster", %(kubeProxySelector)s})' % $._config));
+
+ local rulesSyncRate =
+ graphPanel.new(
+ 'Rules Sync Rate',
+ datasource='$datasource',
+ span=5,
+ min=0,
+ format='ops',
+ )
+ .addTarget(prometheus.target('sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='rate'));
+
+ local rulesSyncLatency =
+ graphPanel.new(
+ 'Rule Sync Latency 99th Quantile',
+ datasource='$datasource',
+ span=5,
+ min=0,
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='{{instance}}'));
+
+ local networkProgrammingRate =
+ graphPanel.new(
+ 'Network Programming Rate',
+ datasource='$datasource',
+ span=6,
+ min=0,
+ format='ops',
+ )
+ .addTarget(prometheus.target('sum(rate(kubeproxy_network_programming_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='rate'));
+
+ local networkProgrammingLatency =
+ graphPanel.new(
+ 'Network Programming Latency 99th Quantile',
+ datasource='$datasource',
+ span=6,
+ min=0,
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (instance, le))' % $._config, legendFormat='{{instance}}'));
+
+ local rpcRate =
+ graphPanel.new(
+ 'Kube API Request Rate',
+ datasource='$datasource',
+ span=4,
+ format='ops',
+ )
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='2xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='3xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='4xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='5xx'));
+
+ local postRequestLatency =
+ graphPanel.new(
+ 'Post Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=8,
+ format='s',
+ min=0,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance",verb="POST"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local getRequestLatency =
+ graphPanel.new(
+ 'Get Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=12,
+ format='s',
+ min=0,
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeProxySelector)s, instance=~"$instance", verb="GET"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local memory =
+ graphPanel.new(
+ 'Memory',
+ datasource='$datasource',
+ span=4,
+ format='bytes',
+ )
+ .addTarget(prometheus.target('process_resident_memory_bytes{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+ local cpu =
+ graphPanel.new(
+ 'CPU usage',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ min=0,
+ )
+ .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}'));
+
+ local goroutines =
+ graphPanel.new(
+ 'Goroutines',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ )
+ .addTarget(prometheus.target('go_goroutines{%(clusterLabel)s="$cluster", %(kubeProxySelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sProxy' % $._config.grafanaK8s,
+ time_from='now-1h',
+ uid=($._config.grafanaDashboardIDs['proxy.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(kubeProxySelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(up{%(kubeProxySelector)s, %(clusterLabel)s="$cluster", %(kubeProxySelector)s}, instance)' % $._config,
+ refresh='time',
+ includeAll=true,
+ sort=1,
+ )
+ )
+ .addRow(
+ row.new()
+ .addPanel(upCount)
+ .addPanel(rulesSyncRate)
+ .addPanel(rulesSyncLatency)
+ ).addRow(
+ row.new()
+ .addPanel(networkProgrammingRate)
+ .addPanel(networkProgrammingLatency)
+ ).addRow(
+ row.new()
+ .addPanel(rpcRate)
+ .addPanel(postRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(getRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(memory)
+ .addPanel(cpu)
+ .addPanel(goroutines)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources.libsonnet
new file mode 100644
index 0000000..484d2f1
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources.libsonnet
@@ -0,0 +1,7 @@
+(import 'resources/cluster.libsonnet') +
+(import 'resources/multi-cluster.libsonnet') +
+(import 'resources/namespace.libsonnet') +
+(import 'resources/node.libsonnet') +
+(import 'resources/pod.libsonnet') +
+(import 'resources/workload-namespace.libsonnet') +
+(import 'resources/workload.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/cluster.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/cluster.libsonnet
new file mode 100644
index 0000000..ff37516
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/cluster.libsonnet
@@ -0,0 +1,331 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(cadvisorSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ 'k8s-resources-cluster.json':
+ local tableStyles = {
+ namespace: {
+ alias: 'Namespace',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-namespace.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ 'Value #A': {
+ alias: 'Pods',
+ linkTooltip: 'Drill down to pods',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-namespace.json') },
+ decimals: 0,
+ },
+ 'Value #B': {
+ alias: 'Workloads',
+ linkTooltip: 'Drill down to workloads',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-workloads-namespace.json') },
+ decimals: 0,
+ },
+ };
+
+
+ local podWorkloadColumns = [
+ 'sum(kube_pod_owner{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'count(avg(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster"}) by (workload, namespace)) by (namespace)' % $._config,
+ ];
+
+ local networkColumns = [
+ 'sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ 'sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ 'sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ 'sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ 'sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ 'sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config,
+ ];
+
+ local networkTableStyles = {
+ namespace: {
+ alias: 'Namespace',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-namespace.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ 'Value #A': {
+ alias: 'Current Receive Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #B': {
+ alias: 'Current Transmit Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #C': {
+ alias: 'Rate of Received Packets',
+ unit: 'pps',
+ },
+ 'Value #D': {
+ alias: 'Rate of Transmitted Packets',
+ unit: 'pps',
+ },
+ 'Value #E': {
+ alias: 'Rate of Received Packets Dropped',
+ unit: 'pps',
+ },
+ 'Value #F': {
+ alias: 'Rate of Transmitted Packets Dropped',
+ unit: 'pps',
+ },
+ };
+
+ local storageIOColumns = [
+ 'sum by(namespace) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(namespace) (rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(namespace) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(namespace) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(namespace) (rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(namespace) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config,
+ ];
+
+ local storageIOTableStyles = {
+ namespace: {
+ alias: 'Namespace',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-namespace.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ 'Value #A': {
+ alias: 'IOPS(Reads)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #B': {
+ alias: 'IOPS(Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #C': {
+ alias: 'IOPS(Reads + Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #D': {
+ alias: 'Throughput(Read)',
+ unit: 'Bps',
+ },
+ 'Value #E': {
+ alias: 'Throughput(Write)',
+ unit: 'Bps',
+ },
+ 'Value #F': {
+ alias: 'Throughput(Read + Write)',
+ unit: 'Bps',
+ },
+ };
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Cluster' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-cluster.json']),
+ )
+ .addRow(
+ (g.row('Headlines') +
+ {
+ height: '100px',
+ showTitle: false,
+ })
+ .addPanel(
+ g.panel('CPU Utilisation') +
+ g.statPanel('cluster:node_cpu:ratio_rate5m{%(clusterLabel)s="$cluster"}' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Requests Commitment') +
+ g.statPanel('sum(namespace_cpu:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu",%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Limits Commitment') +
+ g.statPanel('sum(namespace_cpu:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="cpu",%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Utilisation') +
+ g.statPanel('1 - sum(:node_memory_MemAvailable_bytes:sum{%(clusterLabel)s="$cluster"}) / sum(node_memory_MemTotal_bytes{%(nodeExporterSelector)s,%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Requests Commitment') +
+ g.statPanel('sum(namespace_memory:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="memory",%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Limits Commitment') +
+ g.statPanel('sum(namespace_memory:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s,resource="memory",%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ )
+ .addRow(
+ g.row('CPU')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel('sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel(podWorkloadColumns + [
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(namespace_cpu:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster"}) by (namespace) / sum(namespace_cpu:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(namespace_cpu:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster"}) by (namespace) / sum(namespace_cpu:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ ], tableStyles {
+ 'Value #C': { alias: 'CPU Usage' },
+ 'Value #D': { alias: 'CPU Requests' },
+ 'Value #E': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #F': { alias: 'CPU Limits' },
+ 'Value #G': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory')
+ .addPanel(
+ g.panel('Memory Usage (w/o cache)') +
+ // Not using container_memory_usage_bytes here because that includes page cache
+ g.queryPanel('sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", container!=""}) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('bytes') },
+ )
+ )
+ .addRow(
+ g.row('Memory Requests')
+ .addPanel(
+ g.panel('Requests by Namespace') +
+ g.tablePanel(podWorkloadColumns + [
+ // Not using container_memory_usage_bytes here because that includes page cache
+ 'sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", container!=""}) by (namespace)' % $._config,
+ 'sum(namespace_memory:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", container!=""}) by (namespace) / sum(namespace_memory:kube_pod_container_resource_requests:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(namespace_memory:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", container!=""}) by (namespace) / sum(namespace_memory:kube_pod_container_resource_limits:sum{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ ], tableStyles {
+ 'Value #C': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #D': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #F': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #G': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Current Network Usage')
+ .addPanel(
+ g.panel('Current Network Usage') +
+ g.tablePanel(
+ networkColumns,
+ networkTableStyles
+ ) +
+ { interval: $._config.grafanaK8s.minimumTimeInterval },
+ )
+ )
+ .addRow(
+ g.row('Bandwidth')
+ .addPanel(
+ g.panel('Receive Bandwidth') +
+ g.queryPanel('sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Transmit Bandwidth') +
+ g.queryPanel('sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Average Container Bandwidth by Namespace')
+ .addPanel(
+ g.panel('Average Container Bandwidth by Namespace: Received') +
+ g.queryPanel('avg(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Average Container Bandwidth by Namespace: Transmitted') +
+ g.queryPanel('avg(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets')
+ .addPanel(
+ g.panel('Rate of Received Packets') +
+ g.queryPanel('sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets Dropped')
+ .addPanel(
+ g.panel('Rate of Received Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s=~".+"}[%(grafanaIntervalVar)s])) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO')
+ .addPanel(
+ g.panel('IOPS(Reads+Writes)') +
+ g.queryPanel('ceil(sum by(namespace) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s])))' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('short'), decimals: -1 },
+
+ )
+ .addPanel(
+ g.panel('ThroughPut(Read+Write)') +
+ g.queryPanel('sum by(namespace) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace!=""}[%(grafanaIntervalVar)s]))' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO - Distribution')
+ .addPanel(
+ g.panel('Current Storage IO') +
+ g.tablePanel(
+ storageIOColumns,
+ storageIOTableStyles
+ ) +
+ {
+ sort: {
+ col: 4,
+ desc: true,
+ },
+ },
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate],
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/multi-cluster.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/multi-cluster.libsonnet
new file mode 100644
index 0000000..9579433
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/multi-cluster.libsonnet
@@ -0,0 +1,105 @@
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+
+{
+ grafanaDashboards+::
+ if $._config.showMultiCluster then {
+ 'k8s-resources-multicluster.json':
+ local tableStyles = {
+ [$._config.clusterLabel]: {
+ alias: 'Cluster',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-cluster?var-datasource=$datasource&var-cluster=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-cluster.json') },
+ },
+ };
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Multi-Cluster' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-multicluster.json']),
+ ).addRow(
+ (g.row('Headlines') +
+ {
+ height: '100px',
+ showTitle: false,
+ })
+ .addPanel(
+ g.panel('CPU Utilisation') +
+ g.statPanel('cluster:node_cpu:ratio_rate5m')
+ )
+ .addPanel(
+ g.panel('CPU Requests Commitment') +
+ g.statPanel('sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="cpu"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s, resource="cpu"})' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Limits Commitment') +
+ g.statPanel('sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="cpu"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s, resource="cpu"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Utilisation') +
+ g.statPanel('1 - sum(:node_memory_MemAvailable_bytes:sum) / sum(node_memory_MemTotal_bytes{%(nodeExporterSelector)s})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Requests Commitment') +
+ g.statPanel('sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="memory"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s, resource="memory"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Limits Commitment') +
+ g.statPanel('sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="memory"}) / sum(kube_node_status_allocatable{%(kubeStateMetricsSelector)s, resource="memory"})' % $._config)
+ )
+ )
+ .addRow(
+ g.row('CPU')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel('sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate) by (%(clusterLabel)s)' % $._config, '{{%(clusterLabel)s}}' % $._config)
+ + { fill: 0, linewidth: 2 },
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate) by (%(clusterLabel)s)' % $._config,
+ 'sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="cpu"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate) by (%(clusterLabel)s) / sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="cpu"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="cpu"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate) by (%(clusterLabel)s) / sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="cpu"}) by (%(clusterLabel)s)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory')
+ .addPanel(
+ g.panel('Memory Usage (w/o cache)') +
+ // Not using container_memory_usage_bytes here because that includes page cache
+ g.queryPanel('sum(container_memory_rss{%(cadvisorSelector)s, container!=""}) by (%(clusterLabel)s)' % $._config, '{{%(clusterLabel)s}}' % $._config) +
+ { fill: 0, linewidth: 2, yaxes: g.yaxes('bytes') },
+ )
+ )
+ .addRow(
+ g.row('Memory Requests')
+ .addPanel(
+ g.panel('Requests by Cluster') +
+ g.tablePanel([
+ // Not using container_memory_usage_bytes here because that includes page cache
+ 'sum(container_memory_rss{%(cadvisorSelector)s, container!=""}) by (%(clusterLabel)s)' % $._config,
+ 'sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="memory"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, container!=""}) by (%(clusterLabel)s) / sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, resource="memory"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="memory"}) by (%(clusterLabel)s)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, container!=""}) by (%(clusterLabel)s) / sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, resource="memory"}) by (%(clusterLabel)s)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ ),
+ } else {},
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/namespace.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/namespace.libsonnet
new file mode 100644
index 0000000..6693c5d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/namespace.libsonnet
@@ -0,0 +1,372 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(kube_namespace_status_phase{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=false,
+ sort=1
+ ),
+ 'k8s-resources-namespace.json':
+ local tableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-pod.json') },
+ },
+ };
+
+ local networkColumns = [
+ 'sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ 'sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ 'sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ 'sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config,
+ ];
+
+ local networkTableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-pod.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ 'Value #A': {
+ alias: 'Current Receive Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #B': {
+ alias: 'Current Transmit Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #C': {
+ alias: 'Rate of Received Packets',
+ unit: 'pps',
+ },
+ 'Value #D': {
+ alias: 'Rate of Transmitted Packets',
+ unit: 'pps',
+ },
+ 'Value #E': {
+ alias: 'Rate of Received Packets Dropped',
+ unit: 'pps',
+ },
+ 'Value #F': {
+ alias: 'Rate of Transmitted Packets Dropped',
+ unit: 'pps',
+ },
+ };
+
+ local cpuUsageQuery = 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config;
+ local memoryUsageQuery = 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", container!="", image!=""}) by (pod)' % $._config;
+
+ local cpuQuotaRequestsQuery = 'scalar(kube_resourcequota{%(clusterLabel)s="$cluster", namespace="$namespace", type="hard",resource="requests.cpu"})' % $._config;
+ local cpuQuotaLimitsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'limits.cpu');
+ local memoryQuotaRequestsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'requests.memory');
+ local memoryQuotaLimitsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'limits.memory');
+
+ local storageIOColumns = [
+ 'sum by(pod) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(pod) (rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(pod) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(pod) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(pod) (rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(pod) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config,
+ ];
+
+ local storageIOTableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-pod.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ 'Value #A': {
+ alias: 'IOPS(Reads)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #B': {
+ alias: 'IOPS(Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #C': {
+ alias: 'IOPS(Reads + Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #D': {
+ alias: 'Throughput(Read)',
+ unit: 'Bps',
+ },
+ 'Value #E': {
+ alias: 'Throughput(Write)',
+ unit: 'Bps',
+ },
+ 'Value #F': {
+ alias: 'Throughput(Read + Write)',
+ unit: 'Bps',
+ },
+ };
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Namespace (Pods)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-namespace.json']),
+ )
+ .addRow(
+ (g.row('Headlines') +
+ {
+ height: '100px',
+ showTitle: false,
+ })
+ .addPanel(
+ g.panel('CPU Utilisation (from requests)') +
+ g.statPanel('sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) / sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="cpu"})' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Utilisation (from limits)') +
+ g.statPanel('sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) / sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="cpu"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Utilisation (from requests)') +
+ g.statPanel('sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!="", image!=""}) / sum(kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="memory"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Utilisation (from limits)') +
+ g.statPanel('sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!="", image!=""}) / sum(kube_pod_container_resource_limits{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="memory"})' % $._config)
+ )
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel([
+ cpuUsageQuery,
+ cpuQuotaRequestsQuery,
+ cpuQuotaLimitsQuery,
+ ], ['{{pod}}', 'quota - requests', 'quota - limits']) +
+ g.stack + {
+ seriesOverrides: [
+ {
+ alias: 'quota - requests',
+ color: '#F2495C',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ {
+ alias: 'quota - limits',
+ color: '#FF9830',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage (w/o cache)') +
+ // Like above, without page cache
+ g.queryPanel([
+ memoryUsageQuery,
+ memoryQuotaRequestsQuery,
+ memoryQuotaLimitsQuery,
+ ], ['{{pod}}', 'quota - requests', 'quota - limits']) +
+ g.stack +
+ {
+ yaxes: g.yaxes('bytes'),
+ seriesOverrides: [
+ {
+ alias: 'quota - requests',
+ color: '#F2495C',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ {
+ alias: 'quota - limits',
+ color: '#FF9830',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!="", image!=""}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!="", image!=""}) by (pod) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!="", image!=""}) by (pod) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!=""}) by (pod)' % $._config,
+ 'sum(container_memory_cache{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!=""}) by (pod)' % $._config,
+ 'sum(container_memory_swap{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace",container!=""}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ 'Value #F': { alias: 'Memory Usage (RSS)', unit: 'bytes' },
+ 'Value #G': { alias: 'Memory Usage (Cache)', unit: 'bytes' },
+ 'Value #H': { alias: 'Memory Usage (Swap)', unit: 'bytes' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Current Network Usage')
+ .addPanel(
+ g.panel('Current Network Usage') +
+ g.tablePanel(
+ networkColumns,
+ networkTableStyles
+ ),
+ )
+ )
+ .addRow(
+ g.row('Bandwidth')
+ .addPanel(
+ g.panel('Receive Bandwidth') +
+ g.queryPanel('sum(irate(container_network_receive_bytes_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Transmit Bandwidth') +
+ g.queryPanel('sum(irate(container_network_transmit_bytes_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets')
+ .addPanel(
+ g.panel('Rate of Received Packets') +
+ g.queryPanel('sum(irate(container_network_receive_packets_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets Dropped')
+ .addPanel(
+ g.panel('Rate of Received Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_receive_packets_dropped_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_dropped_total{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO')
+ .addPanel(
+ g.panel('IOPS(Reads+Writes)') +
+ g.queryPanel('ceil(sum by(pod) (rate(container_fs_reads_total{%(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])))' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('short'), decimals: -1 },
+
+ )
+ .addPanel(
+ g.panel('ThroughPut(Read+Write)') +
+ g.queryPanel('sum by(pod) (rate(container_fs_reads_bytes_total{%(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(containerfsSelector)s, %(diskDeviceSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s]))' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO - Distribution')
+ .addPanel(
+ g.panel('Current Storage IO') +
+ g.tablePanel(
+ storageIOColumns,
+ storageIOTableStyles
+ ) +
+ {
+ sort: {
+ col: 4,
+ desc: true,
+ },
+ },
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate, namespaceTemplate],
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/node.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/node.libsonnet
new file mode 100644
index 0000000..0152f58
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/node.libsonnet
@@ -0,0 +1,152 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ local nodeTemplate =
+ template.new(
+ name='node',
+ datasource='$datasource',
+ query='label_values(kube_node_info{%(clusterLabel)s="$cluster"}, node)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=true,
+ sort=1
+ ),
+
+ 'k8s-resources-node.json':
+ local tableStyles = {
+ pod: {
+ alias: 'Pod',
+ },
+ };
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Node (Pods)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-node.json']),
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel([
+ 'sum(kube_node_status_capacity{%(clusterLabel)s="$cluster", node=~"$node", resource="cpu"})' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ ], [
+ 'max capacity',
+ '{{pod}}',
+ ]) +
+ g.stack +
+ {
+ seriesOverrides: [
+ {
+ alias: 'max capacity',
+ color: '#F2495C',
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ dashes: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage (w/o cache)') +
+ // Like above, without page cache
+ g.queryPanel([
+ 'sum(kube_node_status_capacity{%(clusterLabel)s="$cluster", node=~"$node", resource="memory"})' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_working_set_bytes{%(clusterLabel)s="$cluster", node=~"$node", container!=""}) by (pod)' % $._config,
+ ], [
+ 'max capacity',
+ '{{pod}}',
+ ]) +
+ g.stack +
+ { yaxes: g.yaxes('bytes') } +
+ {
+ seriesOverrides: [
+ {
+ alias: 'max capacity',
+ color: '#F2495C',
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ dashes: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ 'sum(node_namespace_pod_container:container_memory_working_set_bytes{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_working_set_bytes{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_working_set_bytes{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", node=~"$node"}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_rss{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_cache{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod)' % $._config,
+ 'sum(node_namespace_pod_container:container_memory_swap{%(clusterLabel)s="$cluster", node=~"$node",container!=""}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ 'Value #F': { alias: 'Memory Usage (RSS)', unit: 'bytes' },
+ 'Value #G': { alias: 'Memory Usage (Cache)', unit: 'bytes' },
+ 'Value #H': { alias: 'Memory Usage (Swap)', unit: 'bytes' },
+ })
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate, nodeTemplate],
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/pod.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/pod.libsonnet
new file mode 100644
index 0000000..e21348d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/pod.libsonnet
@@ -0,0 +1,352 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(kube_namespace_status_phase{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=false,
+ sort=1
+ ),
+
+ local podTemplate =
+ template.new(
+ name='pod',
+ datasource='$datasource',
+ query='label_values(kube_pod_info{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}, pod)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ 'k8s-resources-pod.json':
+ local tableStyles = {
+ container: {
+ alias: 'Container',
+ },
+ };
+
+ local cpuRequestsQuery = |||
+ sum(
+ kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", resource="cpu"}
+ )
+ ||| % $._config;
+
+ local cpuLimitsQuery = std.strReplace(cpuRequestsQuery, 'requests', 'limits');
+ local memRequestsQuery = std.strReplace(cpuRequestsQuery, 'cpu', 'memory');
+ local memLimitsQuery = std.strReplace(cpuLimitsQuery, 'cpu', 'memory');
+
+ local storageIOColumns = [
+ 'sum by(container) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(container) (rate(container_fs_writes_total{%(cadvisorSelector)s,%(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(container) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(container) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(container) (rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ 'sum by(container) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config,
+ ];
+
+ local storageIOTableStyles = {
+ container: {
+ alias: 'Container',
+ },
+ 'Value #A': {
+ alias: 'IOPS(Reads)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #B': {
+ alias: 'IOPS(Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #C': {
+ alias: 'IOPS(Reads + Writes)',
+ unit: 'short',
+ decimals: -1,
+ },
+ 'Value #D': {
+ alias: 'Throughput(Read)',
+ unit: 'Bps',
+ },
+ 'Value #E': {
+ alias: 'Throughput(Write)',
+ unit: 'Bps',
+ },
+ 'Value #F': {
+ alias: 'Throughput(Read + Write)',
+ unit: 'Bps',
+ },
+ };
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Pod' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-pod.json']),
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel(
+ [
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace="$namespace", pod="$pod", %(clusterLabel)s="$cluster"}) by (container)' % $._config,
+ cpuRequestsQuery,
+ cpuLimitsQuery,
+ ], [
+ '{{container}}',
+ 'requests',
+ 'limits',
+ ],
+ ) +
+ g.stack + {
+ seriesOverrides: [
+ {
+ alias: 'requests',
+ color: '#F2495C',
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ },
+ {
+ alias: 'limits',
+ color: '#FF9830',
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('CPU Throttling')
+ .addPanel(
+ g.panel('CPU Throttling') +
+ g.queryPanel('sum(increase(container_cpu_cfs_throttled_periods_total{%(cadvisorSelector)s, namespace="$namespace", pod="$pod", container!="", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])) by (container) /sum(increase(container_cpu_cfs_periods_total{%(cadvisorSelector)s, namespace="$namespace", pod="$pod", container!="", %(clusterLabel)s="$cluster"}[%(grafanaIntervalVar)s])) by (container)' % $._config, '{{container}}') +
+ g.stack
+ + {
+ yaxes: g.yaxes({ format: 'percentunit', max: 1 }),
+ legend+: {
+ current: true,
+ max: true,
+ },
+ thresholds: [
+ {
+ value: $._config.cpuThrottlingPercent / 100,
+ colorMode: 'critical',
+ op: 'gt',
+ fill: true,
+ line: true,
+ yaxis: 'left',
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage (WSS)') +
+ g.queryPanel([
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container!="", image!=""}) by (container)' % $._config,
+ memRequestsQuery,
+ memLimitsQuery,
+ ], [
+ '{{container}}',
+ 'requests',
+ 'limits',
+ ]) +
+ g.stack +
+ {
+ yaxes: g.yaxes('bytes'),
+ seriesOverrides: [
+ {
+ alias: 'requests',
+ color: '#F2495C',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ },
+ {
+ alias: 'limits',
+ color: '#FF9830',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ },
+ ],
+ }
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container!="", image!=""}) by (container)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", image!=""}) by (container) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_requests{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container!="", image!=""}) by (container) / sum(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(container_memory_rss{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container != "", container != "POD"}) by (container)' % $._config,
+ 'sum(container_memory_cache{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container != "", container != "POD"}) by (container)' % $._config,
+ 'sum(container_memory_swap{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod", container != "", container != "POD"}) by (container)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage (WSS)', unit: 'bytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ 'Value #F': { alias: 'Memory Usage (RSS)', unit: 'bytes' },
+ 'Value #G': { alias: 'Memory Usage (Cache)', unit: 'bytes' },
+ 'Value #H': { alias: 'Memory Usage (Swap)', unit: 'bytes' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Bandwidth')
+ .addPanel(
+ g.panel('Receive Bandwidth') +
+ g.queryPanel('sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Transmit Bandwidth') +
+ g.queryPanel('sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets')
+ .addPanel(
+ g.panel('Rate of Received Packets') +
+ g.queryPanel('sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets Dropped')
+ .addPanel(
+ g.panel('Rate of Received Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets Dropped') +
+ g.queryPanel('sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO - Distribution(Pod - Read & Writes)')
+ .addPanel(
+ g.panel('IOPS') +
+ g.queryPanel(['ceil(sum by(pod) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])))' % $._config, 'ceil(sum by(pod) (rate(container_fs_writes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster",namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s])))' % $._config], ['Reads', 'Writes']) +
+ g.stack +
+ { yaxes: g.yaxes('short'), decimals: -1 },
+ )
+ .addPanel(
+ g.panel('ThroughPut') +
+ g.queryPanel(['sum by(pod) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s]))' % $._config, 'sum by(pod) (rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(diskDeviceSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod=~"$pod"}[%(grafanaIntervalVar)s]))' % $._config], ['Reads', 'Writes']) +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO - Distribution(Containers)')
+ .addPanel(
+ g.panel('IOPS(Reads+Writes)') +
+ g.queryPanel('ceil(sum by(container) (rate(container_fs_reads_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s])))' % $._config, '{{container}}') +
+ g.stack +
+ { yaxes: g.yaxes('short'), decimals: -1 },
+ )
+ .addPanel(
+ g.panel('ThroughPut(Read+Write)') +
+ g.queryPanel('sum by(container) (rate(container_fs_reads_bytes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]) + rate(container_fs_writes_bytes_total{%(cadvisorSelector)s, %(containerfsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[%(grafanaIntervalVar)s]))' % $._config, '{{container}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Storage IO - Distribution')
+ .addPanel(
+ g.panel('Current Storage IO') +
+ g.tablePanel(
+ storageIOColumns,
+ storageIOTableStyles
+ ) +
+ {
+ sort: {
+ col: 4,
+ desc: true,
+ },
+ },
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate, namespaceTemplate, podTemplate],
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload-namespace.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload-namespace.libsonnet
new file mode 100644
index 0000000..5c95a48
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload-namespace.libsonnet
@@ -0,0 +1,386 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ local typeTemplate =
+ template.new(
+ name='type',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload=~".+"}, workload_type)' % $._config.clusterLabel,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=0
+ ) + {
+ auto: false,
+ auto_count: 30,
+ auto_min: '10s',
+ definition: 'label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload=~".+"}, workload_type)' % $._config.clusterLabel,
+ skipUrlSync: false,
+ },
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(kube_pod_info{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=false,
+ sort=1
+ ),
+
+ 'k8s-resources-workloads-namespace.json':
+ local tableStyles = {
+ workload: {
+ alias: 'Workload',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-workload.json') },
+ },
+ workload_type: {
+ alias: 'Workload Type',
+ },
+ };
+
+ local networkColumns = [
+ |||
+ (sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload_type=~"$type"}) by (workload))
+ ||| % $._config,
+ ];
+
+ local networkTableStyles = {
+ workload: {
+ alias: 'Workload',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$type' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-workload.json') },
+ linkTooltip: 'Drill down to pods',
+ },
+ workload_type: {
+ alias: 'Workload Type',
+ },
+ 'Value #A': {
+ alias: 'Current Receive Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #B': {
+ alias: 'Current Transmit Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #C': {
+ alias: 'Rate of Received Packets',
+ unit: 'pps',
+ },
+ 'Value #D': {
+ alias: 'Rate of Transmitted Packets',
+ unit: 'pps',
+ },
+ 'Value #E': {
+ alias: 'Rate of Received Packets Dropped',
+ unit: 'pps',
+ },
+ 'Value #F': {
+ alias: 'Rate of Transmitted Packets Dropped',
+ unit: 'pps',
+ },
+ };
+
+ local cpuUsageQuery = |||
+ sum(
+ node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload_type=~"$type"}
+ ) by (workload, workload_type)
+ ||| % $._config;
+
+ local cpuRequestsQuery = |||
+ sum(
+ kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="cpu"}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload_type=~"$type"}
+ ) by (workload, workload_type)
+ ||| % $._config;
+
+ local podCountQuery = 'count(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload_type=~"$type"}) by (workload, workload_type)' % $._config;
+ local cpuLimitsQuery = std.strReplace(cpuRequestsQuery, 'requests', 'limits');
+
+ local memUsageQuery = |||
+ sum(
+ container_memory_working_set_bytes{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", container!="", image!=""}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload_type=~"$type"}
+ ) by (workload, workload_type)
+ ||| % $._config;
+ local memRequestsQuery = std.strReplace(cpuRequestsQuery, 'cpu', 'memory');
+ local memLimitsQuery = std.strReplace(cpuLimitsQuery, 'cpu', 'memory');
+
+ local cpuQuotaRequestsQuery = 'scalar(kube_resourcequota{%(clusterLabel)s="$cluster", namespace="$namespace", type="hard",resource="requests.cpu"})' % $._config;
+ local cpuQuotaLimitsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'limits.cpu');
+ local memoryQuotaRequestsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'requests.memory');
+ local memoryQuotaLimitsQuery = std.strReplace(cpuQuotaRequestsQuery, 'requests.cpu', 'limits.memory');
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Namespace (Workloads)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-workloads-namespace.json']),
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel([cpuUsageQuery, cpuQuotaRequestsQuery, cpuQuotaLimitsQuery], ['{{workload}} - {{workload_type}}', 'quota - requests', 'quota - limits']) +
+ g.stack + {
+ seriesOverrides: [
+ {
+ alias: 'quota - requests',
+ color: '#F2495C',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ {
+ alias: 'quota - limits',
+ color: '#FF9830',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ podCountQuery,
+ cpuUsageQuery,
+ cpuRequestsQuery,
+ cpuUsageQuery + '/' + cpuRequestsQuery,
+ cpuLimitsQuery,
+ cpuUsageQuery + '/' + cpuLimitsQuery,
+ ], tableStyles {
+ 'Value #A': { alias: 'Running Pods', decimals: 0 },
+ 'Value #B': { alias: 'CPU Usage' },
+ 'Value #C': { alias: 'CPU Requests' },
+ 'Value #D': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #E': { alias: 'CPU Limits' },
+ 'Value #F': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage') +
+ g.queryPanel([memUsageQuery, memoryQuotaRequestsQuery, memoryQuotaLimitsQuery], ['{{workload}} - {{workload_type}}', 'quota - requests', 'quota - limits']) +
+ g.stack +
+ {
+ yaxes: g.yaxes('bytes'),
+ seriesOverrides: [
+ {
+ alias: 'quota - requests',
+ color: '#F2495C',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ {
+ alias: 'quota - limits',
+ color: '#FF9830',
+ dashes: true,
+ fill: 0,
+ hideTooltip: true,
+ legend: true,
+ linewidth: 2,
+ stack: false,
+ hiddenSeries: true,
+ },
+ ],
+ },
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ podCountQuery,
+ memUsageQuery,
+ memRequestsQuery,
+ memUsageQuery + '/' + memRequestsQuery,
+ memLimitsQuery,
+ memUsageQuery + '/' + memLimitsQuery,
+ ], tableStyles {
+ 'Value #A': { alias: 'Running Pods', decimals: 0 },
+ 'Value #B': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #D': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #E': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #F': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Current Network Usage')
+ .addPanel(
+ g.panel('Current Network Usage') +
+ g.tablePanel(
+ networkColumns,
+ networkTableStyles
+ ),
+ )
+ )
+ .addRow(
+ g.row('Bandwidth')
+ .addPanel(
+ g.panel('Receive Bandwidth') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Transmit Bandwidth') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Average Container Bandwidth by Workload')
+ .addPanel(
+ g.panel('Average Container Bandwidth by Workload: Received') +
+ g.queryPanel(|||
+ (avg(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Average Container Bandwidth by Workload: Transmitted') +
+ g.queryPanel(|||
+ (avg(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets')
+ .addPanel(
+ g.panel('Rate of Received Packets') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets Dropped')
+ .addPanel(
+ g.panel('Rate of Received Packets Dropped') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets Dropped') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~".+", workload_type=~"$type"}) by (workload))
+ ||| % $._config, '{{workload}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate, namespaceTemplate, typeTemplate],
+ },
+ },
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload.libsonnet
new file mode 100644
index 0000000..666a639
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/resources/workload.libsonnet
@@ -0,0 +1,322 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+local template = grafana.template;
+
+{
+ grafanaDashboards+:: {
+ local clusterTemplate =
+ template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(up{%(kubeStateMetricsSelector)s}, %(clusterLabel)s)' % $._config,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ local namespaceTemplate =
+ template.new(
+ name='namespace',
+ datasource='$datasource',
+ query='label_values(kube_namespace_status_phase{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster"}, namespace)' % $._config,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ multi=false,
+ sort=1
+ ),
+
+ local workloadTypeTemplate =
+ template.new(
+ name='type',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace"}, workload_type)' % $._config.clusterLabel,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=true,
+ sort=0
+ ),
+
+ local workloadTemplate =
+ template.new(
+ name='workload',
+ datasource='$datasource',
+ query='label_values(namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload_type=~"$type"}, workload)' % $._config.clusterLabel,
+ current='',
+ hide='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+ 'k8s-resources-workload.json':
+ local tableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-pod.json') },
+ },
+ };
+
+ local networkColumns = [
+ |||
+ (sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ |||
+ (sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config,
+ ];
+
+ local networkTableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-pod.json') },
+ },
+ 'Value #A': {
+ alias: 'Current Receive Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #B': {
+ alias: 'Current Transmit Bandwidth',
+ unit: 'Bps',
+ },
+ 'Value #C': {
+ alias: 'Rate of Received Packets',
+ unit: 'pps',
+ },
+ 'Value #D': {
+ alias: 'Rate of Transmitted Packets',
+ unit: 'pps',
+ },
+ 'Value #E': {
+ alias: 'Rate of Received Packets Dropped',
+ unit: 'pps',
+ },
+ 'Value #F': {
+ alias: 'Rate of Transmitted Packets Dropped',
+ unit: 'pps',
+ },
+ };
+
+
+ local cpuUsageQuery = |||
+ sum(
+ node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{%(clusterLabel)s="$cluster", namespace="$namespace"}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload="$workload", workload_type=~"$type"}
+ ) by (pod)
+ ||| % $._config;
+
+ local cpuRequestsQuery = |||
+ sum(
+ kube_pod_container_resource_requests{%(kubeStateMetricsSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace", resource="cpu"}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload="$workload", workload_type=~"$type"}
+ ) by (pod)
+ ||| % $._config;
+
+ local cpuLimitsQuery = std.strReplace(cpuRequestsQuery, 'requests', 'limits');
+
+ local memUsageQuery = |||
+ sum(
+ container_memory_working_set_bytes{%(clusterLabel)s="$cluster", namespace="$namespace", container!="", image!=""}
+ * on(namespace,pod)
+ group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", namespace="$namespace", workload="$workload", workload_type=~"$type"}
+ ) by (pod)
+ ||| % $._config;
+ local memRequestsQuery = std.strReplace(cpuRequestsQuery, 'cpu', 'memory');
+ local memLimitsQuery = std.strReplace(cpuLimitsQuery, 'cpu', 'memory');
+
+ g.dashboard(
+ '%(dashboardNamePrefix)sCompute Resources / Workload' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-workload.json']),
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel(cpuUsageQuery, '{{pod}}') +
+ g.stack,
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ cpuUsageQuery,
+ cpuRequestsQuery,
+ cpuUsageQuery + '/' + cpuRequestsQuery,
+ cpuLimitsQuery,
+ cpuUsageQuery + '/' + cpuLimitsQuery,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage') +
+ g.queryPanel(memUsageQuery, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('bytes') },
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ memUsageQuery,
+ memRequestsQuery,
+ memUsageQuery + '/' + memRequestsQuery,
+ memLimitsQuery,
+ memUsageQuery + '/' + memLimitsQuery,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'bytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'bytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'bytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Current Network Usage')
+ .addPanel(
+ g.panel('Current Network Usage') +
+ g.tablePanel(
+ networkColumns,
+ networkTableStyles
+ ),
+ )
+ )
+ .addRow(
+ g.row('Bandwidth')
+ .addPanel(
+ g.panel('Receive Bandwidth') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Transmit Bandwidth') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Average Container Bandwidth by Pod')
+ .addPanel(
+ g.panel('Average Container Bandwidth by Pod: Received') +
+ g.queryPanel(|||
+ (avg(irate(container_network_receive_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Average Container Bandwidth by Pod: Transmitted') +
+ g.queryPanel(|||
+ (avg(irate(container_network_transmit_bytes_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets')
+ .addPanel(
+ g.panel('Rate of Received Packets') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_packets_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ )
+ .addRow(
+ g.row('Rate of Packets Dropped')
+ .addPanel(
+ g.panel('Rate of Received Packets Dropped') +
+ g.queryPanel(|||
+ (sum(irate(container_network_receive_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ .addPanel(
+ g.panel('Rate of Transmitted Packets Dropped') +
+ g.queryPanel(|||
+ (sum(irate(container_network_transmit_packets_dropped_total{%(cadvisorSelector)s, %(clusterLabel)s="$cluster", namespace="$namespace"}[%(grafanaIntervalVar)s])
+ * on (namespace,pod)
+ group_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{%(clusterLabel)s="$cluster", %(namespaceLabel)s="$namespace", workload=~"$workload", workload_type=~"$type"}) by (pod))
+ ||| % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('pps') },
+ )
+ ) + {
+ templating+: {
+ list+: [clusterTemplate, namespaceTemplate, workloadTypeTemplate, workloadTemplate],
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/scheduler.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/scheduler.libsonnet
new file mode 100644
index 0000000..63ad000
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/scheduler.libsonnet
@@ -0,0 +1,186 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local singlestat = grafana.singlestat;
+
+{
+ grafanaDashboards+:: {
+ 'scheduler.json':
+ local upCount =
+ singlestat.new(
+ 'Up',
+ datasource='$datasource',
+ span=2,
+ valueName='min',
+ )
+ .addTarget(prometheus.target('sum(up{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s})' % $._config));
+
+ local schedulingRate =
+ graphPanel.new(
+ 'Scheduling Rate',
+ datasource='$datasource',
+ span=5,
+ format='ops',
+ min=0,
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('sum(rate(scheduler_e2e_scheduling_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} e2e' % $._config))
+ .addTarget(prometheus.target('sum(rate(scheduler_binding_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} binding' % $._config))
+ .addTarget(prometheus.target('sum(rate(scheduler_scheduling_algorithm_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} scheduling algorithm' % $._config))
+ .addTarget(prometheus.target('sum(rate(scheduler_volume_scheduling_duration_seconds_count{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance)' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} volume' % $._config));
+
+
+ local schedulingLatency =
+ graphPanel.new(
+ 'Scheduling latency 99th Quantile',
+ datasource='$datasource',
+ span=5,
+ min=0,
+ format='s',
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, le))' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} e2e' % $._config))
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, le))' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} binding' % $._config))
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, le))' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} scheduling algorithm' % $._config))
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(scheduler_volume_scheduling_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s,instance=~"$instance"}[%(grafanaIntervalVar)s])) by (%(clusterLabel)s, instance, le))' % $._config, legendFormat='{{%(clusterLabel)s}} {{instance}} volume' % $._config));
+
+ local rpcRate =
+ graphPanel.new(
+ 'Kube API Request Rate',
+ datasource='$datasource',
+ span=4,
+ format='ops',
+ min=0,
+ )
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance",code=~"2.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='2xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance",code=~"3.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='3xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance",code=~"4.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='4xx'))
+ .addTarget(prometheus.target('sum(rate(rest_client_requests_total{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance",code=~"5.."}[%(grafanaIntervalVar)s]))' % $._config, legendFormat='5xx'));
+
+ local postRequestLatency =
+ graphPanel.new(
+ 'Post Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=8,
+ format='s',
+ min=0,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance", verb="POST"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local getRequestLatency =
+ graphPanel.new(
+ 'Get Request Latency 99th Quantile',
+ datasource='$datasource',
+ span=12,
+ format='s',
+ min=0,
+ legend_show=true,
+ legend_values=true,
+ legend_current=true,
+ legend_alignAsTable=true,
+ legend_rightSide=true,
+ )
+ .addTarget(prometheus.target('histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance", verb="GET"}[%(grafanaIntervalVar)s])) by (verb, url, le))' % $._config, legendFormat='{{verb}} {{url}}'));
+
+ local memory =
+ graphPanel.new(
+ 'Memory',
+ datasource='$datasource',
+ span=4,
+ format='bytes',
+ )
+ .addTarget(prometheus.target('process_resident_memory_bytes{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+ local cpu =
+ graphPanel.new(
+ 'CPU usage',
+ datasource='$datasource',
+ span=4,
+ format='bytes',
+ min=0,
+ )
+ .addTarget(prometheus.target('rate(process_cpu_seconds_total{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s, instance=~"$instance"}[%(grafanaIntervalVar)s])' % $._config, legendFormat='{{instance}}'));
+
+ local goroutines =
+ graphPanel.new(
+ 'Goroutines',
+ datasource='$datasource',
+ span=4,
+ format='short',
+ )
+ .addTarget(prometheus.target('go_goroutines{%(clusterLabel)s="$cluster", %(kubeSchedulerSelector)s,instance=~"$instance"}' % $._config, legendFormat='{{instance}}'));
+
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sScheduler' % $._config.grafanaK8s,
+ time_from='now-1h',
+ uid=($._config.grafanaDashboardIDs['scheduler.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(kubeSchedulerSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(up{%(kubeSchedulerSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config,
+ refresh='time',
+ includeAll=true,
+ sort=1,
+ )
+ )
+ .addRow(
+ row.new()
+ .addPanel(upCount)
+ .addPanel(schedulingRate)
+ .addPanel(schedulingLatency)
+ ).addRow(
+ row.new()
+ .addPanel(rpcRate)
+ .addPanel(postRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(getRequestLatency)
+ ).addRow(
+ row.new()
+ .addPanel(memory)
+ .addPanel(cpu)
+ .addPanel(goroutines)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/windows.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/windows.libsonnet
new file mode 100644
index 0000000..c4097f6
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/dashboards/windows.libsonnet
@@ -0,0 +1,612 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local g = import 'github.com/grafana/jsonnet-libs/grafana-builder/grafana.libsonnet';
+
+{
+ grafanaDashboards+:: {
+ 'k8s-resources-windows-cluster.json':
+ local tableStyles = {
+ namespace: {
+ alias: 'Namespace',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-windows-namespace?var-datasource=$datasource&var-namespace=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-windows-namespace.json') },
+ },
+ };
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sCompute Resources / Cluster(Windows)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-windows-cluster.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: null,
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ ).addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(windowsExporterSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addRow(
+ (g.row('Headlines') +
+ {
+ height: '100px',
+ showTitle: false,
+ })
+ .addPanel(
+ g.panel('CPU Utilisation') +
+ g.statPanel('1 - avg(rate(windows_cpu_time_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, mode="idle"}[1m]))' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Requests Commitment') +
+ g.statPanel('sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster"}) / sum(node:windows_node_num_cpu:sum{%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('CPU Limits Commitment') +
+ g.statPanel('sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster"}) / sum(node:windows_node_num_cpu:sum{%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Utilisation') +
+ g.statPanel('1 - sum(:windows_node_memory_MemFreeCached_bytes:sum{%(clusterLabel)s="$cluster"}) / sum(:windows_node_memory_MemTotal_bytes:sum{%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Requests Commitment') +
+ g.statPanel('sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster"}) / sum(:windows_node_memory_MemTotal_bytes:sum{%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ .addPanel(
+ g.panel('Memory Limits Commitment') +
+ g.statPanel('sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster"}) / sum(:windows_node_memory_MemTotal_bytes:sum{%(clusterLabel)s="$cluster"})' % $._config)
+ )
+ )
+ .addRow(
+ g.row('CPU')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel('sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster"}) by (namespace) / sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster"}) by (namespace) / sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory')
+ .addPanel(
+ g.panel('Memory Usage (Private Working Set)') +
+ // Not using container_memory_usage_bytes here because that includes page cache
+ g.queryPanel('sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config, '{{namespace}}') +
+ g.stack +
+ { yaxes: g.yaxes('decbytes') },
+ )
+ )
+ .addRow(
+ g.row('Memory Requests')
+ .addPanel(
+ g.panel('Requests by Namespace') +
+ g.tablePanel([
+ // Not using container_memory_usage_bytes here because that includes page cache
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster"}) by (namespace) / sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster"}) by (namespace) / sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster"}) by (namespace)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'decbytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'decbytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'decbytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ ),
+
+ 'k8s-resources-windows-namespace.json':
+ local tableStyles = {
+ pod: {
+ alias: 'Pod',
+ link: '%(prefix)s/d/%(uid)s/k8s-resources-windows-pod?var-datasource=$datasource&var-namespace=$namespace&var-pod=$__cell' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-resources-windows-pod.json') },
+ },
+ };
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sCompute Resources / Namespace(Windows)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-windows-namespace.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: $._config.datasourceName,
+ },
+ hide: 0,
+ label: null,
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'namespace',
+ '$datasource',
+ 'label_values(windows_pod_container_available, namespace)',
+ label='Namespace',
+ refresh='time',
+ sort=1,
+ )
+ ).addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(windowsExporterSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel('sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config, '{{pod}}') +
+ g.stack,
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage') +
+ g.queryPanel('sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config, '{{pod}}') +
+ g.stack +
+ { yaxes: g.yaxes('decbytes') },
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod) / sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster", namespace="$namespace"}) by (pod)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'decbytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'decbytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'decbytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ ),
+
+ 'k8s-resources-windows-pod.json':
+ local tableStyles = {
+ container: {
+ alias: 'Container',
+ },
+ };
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sCompute Resources / Pod(Windows)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-resources-windows-pod.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: null,
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'namespace',
+ '$datasource',
+ 'label_values(windows_pod_container_available, namespace)',
+ label='Namespace',
+ refresh='time',
+ sort=1,
+ )
+ )
+ .addTemplate(
+ template.new(
+ 'pod',
+ '$datasource',
+ 'label_values(windows_pod_container_available{namespace="$namespace"}, pod)',
+ label='Pod',
+ refresh='time',
+ sort=1,
+ )
+ ).addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(windowsExporterSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addRow(
+ g.row('CPU Usage')
+ .addPanel(
+ g.panel('CPU Usage') +
+ g.queryPanel('sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config, '{{container}}') +
+ g.stack,
+ )
+ )
+ .addRow(
+ g.row('CPU Quota')
+ .addPanel(
+ g.panel('CPU Quota') +
+ g.tablePanel([
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(kube_pod_windows_container_resource_cpu_cores_request{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(kube_pod_windows_container_resource_cpu_cores_limit{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'CPU Usage' },
+ 'Value #B': { alias: 'CPU Requests' },
+ 'Value #C': { alias: 'CPU Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'CPU Limits' },
+ 'Value #E': { alias: 'CPU Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Memory Usage')
+ .addPanel(
+ g.panel('Memory Usage') +
+ g.queryPanel('sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config, '{{container}}') +
+ g.stack,
+ )
+ )
+ .addRow(
+ g.row('Memory Quota')
+ .addPanel(
+ g.panel('Memory Quota') +
+ g.tablePanel([
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(kube_pod_windows_container_resource_memory_request{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ 'sum(windows_container_private_working_set_usage{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container) / sum(kube_pod_windows_container_resource_memory_limit{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}) by (container)' % $._config,
+ ], tableStyles {
+ 'Value #A': { alias: 'Memory Usage', unit: 'decbytes' },
+ 'Value #B': { alias: 'Memory Requests', unit: 'decbytes' },
+ 'Value #C': { alias: 'Memory Requests %', unit: 'percentunit' },
+ 'Value #D': { alias: 'Memory Limits', unit: 'decbytes' },
+ 'Value #E': { alias: 'Memory Limits %', unit: 'percentunit' },
+ })
+ )
+ )
+ .addRow(
+ g.row('Network I/O')
+ .addPanel(
+ graphPanel.new(
+ 'Network I/O',
+ datasource='$datasource',
+ format='bytes',
+ min=0,
+ legend_rightSide=true,
+ legend_alignAsTable=true,
+ legend_current=true,
+ legend_avg=true,
+ )
+ .addTarget(prometheus.target(
+ 'sort_desc(sum by (container) (rate(windows_container_network_received_bytes_total{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[1m])))' % $._config,
+ legendFormat='Received : {{ container }}',
+ ))
+ .addTarget(prometheus.target(
+ 'sort_desc(sum by (container) (rate(windows_container_network_transmitted_bytes_total{%(clusterLabel)s="$cluster", namespace="$namespace", pod="$pod"}[1m])))' % $._config,
+ legendFormat='Transmitted : {{ container }}',
+ ))
+ )
+ ),
+
+ 'k8s-windows-cluster-rsrc-use.json':
+ local legendLink = '%(prefix)s/d/%(uid)s/k8s-windows-node-rsrc-use' % { prefix: $._config.grafanaK8s.linkPrefix, uid: std.md5('k8s-windows-node-rsrc-use.json') };
+
+ dashboard.new(
+ '%(dashboardNamePrefix)sUSE Method / Cluster(Windows)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-windows-cluster-rsrc-use.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: null,
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ ).addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(windowsExporterSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addRow(
+ g.row('CPU')
+ .addPanel(
+ g.panel('CPU Utilisation') +
+ g.queryPanel('node:windows_node_cpu_utilisation:avg1m{%(clusterLabel)s="$cluster"} * node:windows_node_num_cpu:sum{%(clusterLabel)s="$cluster"} / scalar(sum(node:windows_node_num_cpu:sum{%(clusterLabel)s="$cluster"}))' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) },
+ )
+ )
+ .addRow(
+ g.row('Memory')
+ .addPanel(
+ g.panel('Memory Utilisation') +
+ g.queryPanel('node:windows_node_memory_utilisation:ratio{%(clusterLabel)s="$cluster"}' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) },
+ )
+ .addPanel(
+ g.panel('Memory Saturation (Swap I/O Pages)') +
+ g.queryPanel('node:windows_node_memory_swap_io_pages:irate{%(clusterLabel)s="$cluster"}' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes('short') },
+ )
+ )
+ .addRow(
+ g.row('Disk')
+ .addPanel(
+ g.panel('Disk IO Utilisation') +
+ // Full utilisation would be all disks on each node spending an average of
+ // 1 sec per second doing I/O, normalize by node count for stacked charts
+ g.queryPanel('node:windows_node_disk_utilisation:avg_irate{%(clusterLabel)s="$cluster"} / scalar(node:windows_node:sum{%(clusterLabel)s="$cluster"})' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) },
+ )
+ )
+ .addRow(
+ g.row('Network')
+ .addPanel(
+ g.panel('Net Utilisation (Transmitted)') +
+ g.queryPanel('node:windows_node_net_utilisation:sum_irate{%(clusterLabel)s="$cluster"}' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Net Saturation (Dropped)') +
+ g.queryPanel('node:windows_node_net_saturation:sum_irate{%(clusterLabel)s="$cluster"}' % $._config, '{{instance}}', legendLink) +
+ g.stack +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Storage')
+ .addPanel(
+ g.panel('Disk Capacity') +
+ g.queryPanel(
+ |||
+ sum by (instance)(node:windows_node_filesystem_usage:{%(clusterLabel)s="$cluster"})
+ ||| % $._config, '{{instance}}', legendLink
+ ) +
+ g.stack +
+ { yaxes: g.yaxes({ format: 'percentunit', max: 1 }) },
+ ),
+ ),
+
+ 'k8s-windows-node-rsrc-use.json':
+ dashboard.new(
+ '%(dashboardNamePrefix)sUSE Method / Node(Windows)' % $._config.grafanaK8s,
+ uid=($._config.grafanaDashboardIDs['k8s-windows-node-rsrc-use.json']),
+ tags=($._config.grafanaK8s.dashboardTags),
+ ).addTemplate(
+ {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: null,
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: $._config.datasourceFilterRegex,
+ type: 'datasource',
+ },
+ )
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(windows_system_system_up_time, instance)',
+ label='Instance',
+ refresh='time',
+ sort=1,
+ )
+ ).addTemplate(
+ template.new(
+ 'cluster',
+ '$datasource',
+ 'label_values(up{%(windowsExporterSelector)s}, %(clusterLabel)s)' % $._config,
+ label='cluster',
+ refresh='time',
+ hide=if $._config.showMultiCluster then '' else 'variable',
+ sort=1,
+ )
+ )
+ .addRow(
+ g.row('CPU')
+ .addPanel(
+ g.panel('CPU Utilisation') +
+ g.queryPanel('node:windows_node_cpu_utilisation:avg1m{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Utilisation') +
+ { yaxes: g.yaxes('percentunit') },
+ )
+ .addPanel(
+ g.panel('CPU Usage Per Core') +
+ g.queryPanel('sum by (core) (irate(windows_cpu_time_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, mode!="idle", instance="$instance"}[%(grafanaIntervalVar)s]))' % $._config, '{{core}}') +
+ { yaxes: g.yaxes('percentunit') },
+ )
+ )
+ .addRow(
+ g.row('Memory')
+ .addPanel(
+ g.panel('Memory Utilisation %') +
+ g.queryPanel('node:windows_node_memory_utilisation:{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Memory') +
+ { yaxes: g.yaxes('percentunit') },
+ )
+ .addPanel(
+ graphPanel.new('Memory Usage',
+ datasource='$datasource',
+ format='bytes',)
+ .addTarget(prometheus.target(
+ |||
+ max(
+ windows_os_visible_memory_bytes{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}
+ - windows_memory_available_bytes{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}
+ )
+ ||| % $._config, legendFormat='memory used'
+ ))
+ .addTarget(prometheus.target('max(node:windows_node_memory_totalCached_bytes:sum{%(clusterLabel)s="$cluster", instance="$instance"})' % $._config, legendFormat='memory cached'))
+ .addTarget(prometheus.target('max(windows_memory_available_bytes{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"})' % $._config, legendFormat='memory free'))
+ )
+ .addPanel(
+ g.panel('Memory Saturation (Swap I/O) Pages') +
+ g.queryPanel('node:windows_node_memory_swap_io_pages:irate{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Swap IO') +
+ { yaxes: g.yaxes('short') },
+ )
+ )
+ .addRow(
+ g.row('Disk')
+ .addPanel(
+ g.panel('Disk IO Utilisation') +
+ g.queryPanel('node:windows_node_disk_utilisation:avg_irate{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Utilisation') +
+ { yaxes: g.yaxes('percentunit') },
+ )
+ .addPanel(
+ graphPanel.new('Disk I/O', datasource='$datasource')
+ .addTarget(prometheus.target('max(rate(windows_logical_disk_read_bytes_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}[2m]))' % $._config, legendFormat='read'))
+ .addTarget(prometheus.target('max(rate(windows_logical_disk_write_bytes_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}[2m]))' % $._config, legendFormat='written'))
+ .addTarget(prometheus.target('max(rate(windows_logical_disk_read_seconds_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}[2m]) + rate(windows_logical_disk_write_seconds_total{%(clusterLabel)s="$cluster", %(windowsExporterSelector)s, instance="$instance"}[2m]))' % $._config, legendFormat='io time')) +
+ {
+ seriesOverrides: [
+ {
+ alias: 'read',
+ yaxis: 1,
+ },
+ {
+ alias: 'io time',
+ yaxis: 2,
+ },
+ ],
+ yaxes: [
+ self.yaxe(format='bytes'),
+ self.yaxe(format='ms'),
+ ],
+ }
+ )
+ )
+ .addRow(
+ g.row('Net')
+ .addPanel(
+ g.panel('Net Utilisation (Transmitted)') +
+ g.queryPanel('node:windows_node_net_utilisation:sum_irate{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Utilisation') +
+ { yaxes: g.yaxes('Bps') },
+ )
+ .addPanel(
+ g.panel('Net Saturation (Dropped)') +
+ g.queryPanel('node:windows_node_net_saturation:sum_irate{%(clusterLabel)s="$cluster", instance="$instance"}' % $._config, 'Saturation') +
+ { yaxes: g.yaxes('Bps') },
+ )
+ )
+ .addRow(
+ g.row('Disk')
+ .addPanel(
+ g.panel('Disk Utilisation') +
+ g.queryPanel(
+ |||
+ node:windows_node_filesystem_usage:{%(clusterLabel)s="$cluster", instance="$instance"}
+ ||| % $._config,
+ '{{volume}}',
+ ) +
+ { yaxes: g.yaxes('percentunit') },
+ ),
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/jsonnetfile.json b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/jsonnetfile.json
new file mode 100644
index 0000000..1c64fd0
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/jsonnetfile.json
@@ -0,0 +1,24 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet"
+ }
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/jsonnet-libs.git",
+ "subdir": "grafana-builder"
+ }
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/absent_alert.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/absent_alert.libsonnet
new file mode 100644
index 0000000..93b326e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/absent_alert.libsonnet
@@ -0,0 +1,18 @@
+{
+ local absentAlert = self,
+ componentName:: error 'must provide component name',
+ selector:: error 'must provide selector for component',
+
+ alert: '%sDown' % absentAlert.componentName,
+ expr: |||
+ absent(up{%s} == 1)
+ ||| % absentAlert.selector,
+ 'for': '15m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ description: '%s has disappeared from Prometheus target discovery.' % absentAlert.componentName,
+ summary: 'Target disappeared from Prometheus target discovery.',
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet
new file mode 100644
index 0000000..dae01a8
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/add-runbook-links.libsonnet
@@ -0,0 +1,23 @@
+local utils = import 'utils.libsonnet';
+
+local lower(x) =
+ local cp(c) = std.codepoint(c);
+ local lowerLetter(c) =
+ if cp(c) >= 65 && cp(c) < 91
+ then std.char(cp(c) + 32)
+ else c;
+ std.join('', std.map(lowerLetter, std.stringChars(x)));
+
+{
+ _config+:: {
+ runbookURLPattern: 'https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-%s',
+ },
+
+ prometheusAlerts+::
+ local addRunbookURL(rule) = rule {
+ [if 'alert' in rule && !('runbook_url' in rule.annotations) then 'annotations']+: {
+ runbook_url: $._config.runbookURLPattern % lower(rule.alert),
+ },
+ };
+ utils.mapRuleGroups(addRunbookURL),
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/alerts.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/alerts.jsonnet
new file mode 100644
index 0000000..d396a38
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/alerts.jsonnet
@@ -0,0 +1 @@
+std.manifestYamlDoc((import '../mixin.libsonnet').prometheusAlerts)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/dashboards.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/dashboards.jsonnet
new file mode 100644
index 0000000..dadaebe
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/dashboards.jsonnet
@@ -0,0 +1,6 @@
+local dashboards = (import '../mixin.libsonnet').grafanaDashboards;
+
+{
+ [name]: dashboards[name]
+ for name in std.objectFields(dashboards)
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/gauge.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/gauge.libsonnet
new file mode 100644
index 0000000..ee76985
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/gauge.libsonnet
@@ -0,0 +1,60 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local singlestat = grafana.singlestat;
+local prometheus = grafana.prometheus;
+
+{
+ new(title, query)::
+ singlestat.new(
+ title,
+ datasource='$datasource',
+ span=3,
+ format='percent',
+ valueName='current',
+ colors=[
+ 'rgba(245, 54, 54, 0.9)',
+ 'rgba(237, 129, 40, 0.89)',
+ 'rgba(50, 172, 45, 0.97)',
+ ],
+ thresholds='50, 80',
+ valueMaps=[
+ {
+ op: '=',
+ text: 'N/A',
+ value: 'null',
+ },
+ ],
+ )
+ .addTarget(
+ prometheus.target(
+ query
+ )
+ ) + {
+ gauge: {
+ maxValue: 100,
+ minValue: 0,
+ show: true,
+ thresholdLabels: false,
+ thresholdMarkers: true,
+ },
+ withTextNullValue(text):: self {
+ valueMaps: [
+ {
+ op: '=',
+ text: text,
+ value: 'null',
+ },
+ ],
+ },
+ withSpanSize(size):: self {
+ span: size,
+ },
+ withLowerBeingBetter():: self {
+ colors: [
+ 'rgba(50, 172, 45, 0.97)',
+ 'rgba(237, 129, 40, 0.89)',
+ 'rgba(245, 54, 54, 0.9)',
+ ],
+ thresholds: '80, 90',
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/numbersinglestat.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/numbersinglestat.libsonnet
new file mode 100644
index 0000000..71a2525
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/numbersinglestat.libsonnet
@@ -0,0 +1,48 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local singlestat = grafana.singlestat;
+local prometheus = grafana.prometheus;
+
+{
+ new(title, query)::
+ singlestat.new(
+ title,
+ datasource='$datasource',
+ span=3,
+ valueName='current',
+ valueMaps=[
+ {
+ op: '=',
+ text: '0',
+ value: 'null',
+ },
+ ],
+ )
+ .addTarget(
+ prometheus.target(
+ query
+ )
+ ) + {
+ withTextNullValue(text):: self {
+ valueMaps: [
+ {
+ op: '=',
+ text: text,
+ value: 'null',
+ },
+ ],
+ },
+ withSpanSize(size):: self {
+ span: size,
+ },
+ withPostfix(postfix):: self {
+ postfix: postfix,
+ },
+ withSparkline():: self {
+ sparkline: {
+ show: true,
+ lineColor: 'rgb(31, 120, 193)',
+ fillColor: 'rgba(31, 118, 189, 0.18)',
+ },
+ },
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/promgrafonnet.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/promgrafonnet.libsonnet
new file mode 100644
index 0000000..8b74e36
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/promgrafonnet/promgrafonnet.libsonnet
@@ -0,0 +1,4 @@
+{
+ numbersinglestat:: import 'numbersinglestat.libsonnet',
+ gauge:: import 'gauge.libsonnet',
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/rules.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/rules.jsonnet
new file mode 100644
index 0000000..2d7fa91
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/rules.jsonnet
@@ -0,0 +1 @@
+std.manifestYamlDoc((import '../mixin.libsonnet').prometheusRules)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/utils.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/utils.libsonnet
new file mode 100644
index 0000000..a8c4df4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/lib/utils.libsonnet
@@ -0,0 +1,18 @@
+{
+ mapRuleGroups(f): {
+ groups: [
+ group {
+ rules: [
+ f(rule)
+ for rule in super.rules
+ ],
+ }
+ for group in super.groups
+ ],
+ },
+
+ humanizeSeconds(s)::
+ if s > 60 * 60 * 24
+ then '%.1f days' % (s / 60 / 60 / 24)
+ else '%.1f hours' % (s / 60 / 60),
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet
new file mode 100644
index 0000000..152721d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/mixin.libsonnet
@@ -0,0 +1,4 @@
+(import 'alerts/alerts.libsonnet') +
+(import 'dashboards/dashboards.libsonnet') +
+(import 'rules/rules.libsonnet') +
+(import 'config.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/apps.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/apps.libsonnet
new file mode 100644
index 0000000..5399fae
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/apps.libsonnet
@@ -0,0 +1,221 @@
+{
+ _config+:: {
+ cadvisorSelector: 'job="cadvisor"',
+ kubeStateMetricsSelector: 'job="kube-state-metrics"',
+ },
+
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'k8s.rules',
+ rules: [
+ {
+ // Reduces cardinality of this timeseries by #cores, which makes it
+ // more useable in dashboards. Also, allows us to do things like
+ // quantile_over_time(...) which would otherwise not be possible.
+ record: 'node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate',
+ expr: |||
+ sum by (%(clusterLabel)s, namespace, pod, container) (
+ irate(container_cpu_usage_seconds_total{%(cadvisorSelector)s, image!=""}[5m])
+ ) * on (%(clusterLabel)s, namespace, pod) group_left(node) topk by (%(clusterLabel)s, namespace, pod) (
+ 1, max by(%(clusterLabel)s, namespace, pod, node) (kube_pod_info{node!=""})
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node_namespace_pod_container:container_memory_working_set_bytes',
+ expr: |||
+ container_memory_working_set_bytes{%(cadvisorSelector)s, image!=""}
+ * on (%(clusterLabel)s, namespace, pod) group_left(node) topk by(%(clusterLabel)s, namespace, pod) (1,
+ max by(%(clusterLabel)s, namespace, pod, node) (kube_pod_info{node!=""})
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node_namespace_pod_container:container_memory_rss',
+ expr: |||
+ container_memory_rss{%(cadvisorSelector)s, image!=""}
+ * on (%(clusterLabel)s, namespace, pod) group_left(node) topk by(%(clusterLabel)s, namespace, pod) (1,
+ max by(%(clusterLabel)s, namespace, pod, node) (kube_pod_info{node!=""})
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node_namespace_pod_container:container_memory_cache',
+ expr: |||
+ container_memory_cache{%(cadvisorSelector)s, image!=""}
+ * on (%(clusterLabel)s, namespace, pod) group_left(node) topk by(%(clusterLabel)s, namespace, pod) (1,
+ max by(%(clusterLabel)s, namespace, pod, node) (kube_pod_info{node!=""})
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node_namespace_pod_container:container_memory_swap',
+ expr: |||
+ container_memory_swap{%(cadvisorSelector)s, image!=""}
+ * on (%(clusterLabel)s, namespace, pod) group_left(node) topk by(%(clusterLabel)s, namespace, pod) (1,
+ max by(%(clusterLabel)s, namespace, pod, node) (kube_pod_info{node!=""})
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'cluster:namespace:pod_memory:active:kube_pod_container_resource_requests',
+ expr: |||
+ kube_pod_container_resource_requests{resource="memory",%(kubeStateMetricsSelector)s} * on (namespace, pod, %(clusterLabel)s)
+ group_left() max by (namespace, pod, %(clusterLabel)s) (
+ (kube_pod_status_phase{phase=~"Pending|Running"} == 1)
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'namespace_memory:kube_pod_container_resource_requests:sum',
+ expr: |||
+ sum by (namespace, %(clusterLabel)s) (
+ sum by (namespace, pod, %(clusterLabel)s) (
+ max by (namespace, pod, container, %(clusterLabel)s) (
+ kube_pod_container_resource_requests{resource="memory",%(kubeStateMetricsSelector)s}
+ ) * on(namespace, pod, %(clusterLabel)s) group_left() max by (namespace, pod, %(clusterLabel)s) (
+ kube_pod_status_phase{phase=~"Pending|Running"} == 1
+ )
+ )
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'cluster:namespace:pod_cpu:active:kube_pod_container_resource_requests',
+ expr: |||
+ kube_pod_container_resource_requests{resource="cpu",%(kubeStateMetricsSelector)s} * on (namespace, pod, %(clusterLabel)s)
+ group_left() max by (namespace, pod, %(clusterLabel)s) (
+ (kube_pod_status_phase{phase=~"Pending|Running"} == 1)
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'namespace_cpu:kube_pod_container_resource_requests:sum',
+ expr: |||
+ sum by (namespace, %(clusterLabel)s) (
+ sum by (namespace, pod, %(clusterLabel)s) (
+ max by (namespace, pod, container, %(clusterLabel)s) (
+ kube_pod_container_resource_requests{resource="cpu",%(kubeStateMetricsSelector)s}
+ ) * on(namespace, pod, %(clusterLabel)s) group_left() max by (namespace, pod, %(clusterLabel)s) (
+ kube_pod_status_phase{phase=~"Pending|Running"} == 1
+ )
+ )
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'cluster:namespace:pod_memory:active:kube_pod_container_resource_limits',
+ expr: |||
+ kube_pod_container_resource_limits{resource="memory",%(kubeStateMetricsSelector)s} * on (namespace, pod, %(clusterLabel)s)
+ group_left() max by (namespace, pod, %(clusterLabel)s) (
+ (kube_pod_status_phase{phase=~"Pending|Running"} == 1)
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'namespace_memory:kube_pod_container_resource_limits:sum',
+ expr: |||
+ sum by (namespace, %(clusterLabel)s) (
+ sum by (namespace, pod, %(clusterLabel)s) (
+ max by (namespace, pod, container, %(clusterLabel)s) (
+ kube_pod_container_resource_limits{resource="memory",%(kubeStateMetricsSelector)s}
+ ) * on(namespace, pod, %(clusterLabel)s) group_left() max by (namespace, pod, %(clusterLabel)s) (
+ kube_pod_status_phase{phase=~"Pending|Running"} == 1
+ )
+ )
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits',
+ expr: |||
+ kube_pod_container_resource_limits{resource="cpu",%(kubeStateMetricsSelector)s} * on (namespace, pod, %(clusterLabel)s)
+ group_left() max by (namespace, pod, %(clusterLabel)s) (
+ (kube_pod_status_phase{phase=~"Pending|Running"} == 1)
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'namespace_cpu:kube_pod_container_resource_limits:sum',
+ expr: |||
+ sum by (namespace, %(clusterLabel)s) (
+ sum by (namespace, pod, %(clusterLabel)s) (
+ max by (namespace, pod, container, %(clusterLabel)s) (
+ kube_pod_container_resource_limits{resource="cpu",%(kubeStateMetricsSelector)s}
+ ) * on(namespace, pod, %(clusterLabel)s) group_left() max by (namespace, pod, %(clusterLabel)s) (
+ kube_pod_status_phase{phase=~"Pending|Running"} == 1
+ )
+ )
+ )
+ ||| % $._config,
+ },
+ // workload aggregation for deployments
+ {
+ record: 'namespace_workload_pod:kube_pod_owner:relabel',
+ expr: |||
+ max by (%(clusterLabel)s, namespace, workload, pod) (
+ label_replace(
+ label_replace(
+ kube_pod_owner{%(kubeStateMetricsSelector)s, owner_kind="ReplicaSet"},
+ "replicaset", "$1", "owner_name", "(.*)"
+ ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) (
+ 1, max by (replicaset, namespace, owner_name) (
+ kube_replicaset_owner{%(kubeStateMetricsSelector)s}
+ )
+ ),
+ "workload", "$1", "owner_name", "(.*)"
+ )
+ )
+ ||| % $._config,
+ labels: {
+ workload_type: 'deployment',
+ },
+ },
+ {
+ record: 'namespace_workload_pod:kube_pod_owner:relabel',
+ expr: |||
+ max by (%(clusterLabel)s, namespace, workload, pod) (
+ label_replace(
+ kube_pod_owner{%(kubeStateMetricsSelector)s, owner_kind="DaemonSet"},
+ "workload", "$1", "owner_name", "(.*)"
+ )
+ )
+ ||| % $._config,
+ labels: {
+ workload_type: 'daemonset',
+ },
+ },
+ {
+ record: 'namespace_workload_pod:kube_pod_owner:relabel',
+ expr: |||
+ max by (%(clusterLabel)s, namespace, workload, pod) (
+ label_replace(
+ kube_pod_owner{%(kubeStateMetricsSelector)s, owner_kind="StatefulSet"},
+ "workload", "$1", "owner_name", "(.*)"
+ )
+ )
+ ||| % $._config,
+ labels: {
+ workload_type: 'statefulset',
+ },
+ },
+ {
+ record: 'namespace_workload_pod:kube_pod_owner:relabel',
+ expr: |||
+ max by (%(clusterLabel)s, namespace, workload, pod) (
+ label_replace(
+ kube_pod_owner{%(kubeStateMetricsSelector)s, owner_kind="Job"},
+ "workload", "$1", "owner_name", "(.*)"
+ )
+ )
+ ||| % $._config,
+ labels: {
+ workload_type: 'job',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_apiserver.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_apiserver.libsonnet
new file mode 100644
index 0000000..be20c25
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_apiserver.libsonnet
@@ -0,0 +1,284 @@
+{
+ _config+:: {
+ kubeApiserverSelector: 'job="kube-apiserver"',
+ podLabel: 'pod',
+ kubeApiserverReadSelector: 'verb=~"LIST|GET"',
+ kubeApiserverWriteSelector: 'verb=~"POST|PUT|PATCH|DELETE"',
+ kubeApiserverNonStreamingSelector: 'subresource!~"proxy|attach|log|exec|portforward"',
+ // These are buckets that exist on the apiserver_request_slo_duration_seconds_bucket histogram.
+ // They are what the Kubernetes SIG Scalability is using to measure availability of Kubernetes clusters.
+ // If you want to change these, make sure the "le" buckets exist on the histogram!
+ kubeApiserverReadResourceLatency: '1',
+ kubeApiserverReadNamespaceLatency: '5',
+ kubeApiserverReadClusterLatency: '30',
+ kubeApiserverWriteLatency: '1',
+ },
+
+ prometheusRules+:: {
+ local SLODays = $._config.SLOs.apiserver.days + 'd',
+ local verbs = [
+ { type: 'read', selector: $._config.kubeApiserverReadSelector },
+ { type: 'write', selector: $._config.kubeApiserverWriteSelector },
+ ],
+
+ groups+: [
+ {
+ name: 'kube-apiserver-burnrate.rules',
+ rules: [
+ {
+ record: 'apiserver_request:burnrate%(window)s' % w,
+ expr: |||
+ (
+ (
+ # too slow
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_count{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s,%(kubeApiserverNonStreamingSelector)s}[%(window)s]))
+ -
+ (
+ (
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_bucket{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s,%(kubeApiserverNonStreamingSelector)s,scope=~"resource|",le="%(kubeApiserverReadResourceLatency)s"}[%(window)s]))
+ or
+ vector(0)
+ )
+ +
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_bucket{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s,%(kubeApiserverNonStreamingSelector)s,scope="namespace",le="%(kubeApiserverReadNamespaceLatency)s"}[%(window)s]))
+ +
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_bucket{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s,%(kubeApiserverNonStreamingSelector)s,scope="cluster",le="%(kubeApiserverReadClusterLatency)s"}[%(window)s]))
+ )
+ )
+ +
+ # errors
+ sum by (%(clusterLabel)s) (rate(apiserver_request_total{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s,code=~"5.."}[%(window)s]))
+ )
+ /
+ sum by (%(clusterLabel)s) (rate(apiserver_request_total{%(kubeApiserverSelector)s,%(kubeApiserverReadSelector)s}[%(window)s]))
+ ||| % {
+ clusterLabel: $._config.clusterLabel,
+ window: w,
+ kubeApiserverSelector: $._config.kubeApiserverSelector,
+ kubeApiserverReadSelector: $._config.kubeApiserverReadSelector,
+ kubeApiserverNonStreamingSelector: $._config.kubeApiserverNonStreamingSelector,
+ kubeApiserverReadResourceLatency: $._config.kubeApiserverReadResourceLatency,
+ kubeApiserverReadNamespaceLatency: $._config.kubeApiserverReadNamespaceLatency,
+ kubeApiserverReadClusterLatency: $._config.kubeApiserverReadClusterLatency,
+ },
+ labels: {
+ verb: 'read',
+ },
+ }
+ for w in std.set([ // Get the unique array of short and long window rates
+ w.short
+ for w in $._config.SLOs.apiserver.windows
+ ] + [
+ w.long
+ for w in $._config.SLOs.apiserver.windows
+ ])
+ ] + [
+ {
+ record: 'apiserver_request:burnrate%(window)s' % w,
+ expr: |||
+ (
+ (
+ # too slow
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_count{%(kubeApiserverSelector)s,%(kubeApiserverWriteSelector)s,%(kubeApiserverNonStreamingSelector)s}[%(window)s]))
+ -
+ sum by (%(clusterLabel)s) (rate(apiserver_request_slo_duration_seconds_bucket{%(kubeApiserverSelector)s,%(kubeApiserverWriteSelector)s,%(kubeApiserverNonStreamingSelector)s,le="%(kubeApiserverWriteLatency)s"}[%(window)s]))
+ )
+ +
+ sum by (%(clusterLabel)s) (rate(apiserver_request_total{%(kubeApiserverSelector)s,%(kubeApiserverWriteSelector)s,code=~"5.."}[%(window)s]))
+ )
+ /
+ sum by (%(clusterLabel)s) (rate(apiserver_request_total{%(kubeApiserverSelector)s,%(kubeApiserverWriteSelector)s}[%(window)s]))
+ ||| % {
+ clusterLabel: $._config.clusterLabel,
+ window: w,
+ kubeApiserverSelector: $._config.kubeApiserverSelector,
+ kubeApiserverWriteSelector: $._config.kubeApiserverWriteSelector,
+ kubeApiserverNonStreamingSelector: $._config.kubeApiserverNonStreamingSelector,
+ kubeApiserverWriteLatency: $._config.kubeApiserverWriteLatency,
+ },
+ labels: {
+ verb: 'write',
+ },
+ }
+ for w in std.set([ // Get the unique array of short and long window rates
+ w.short
+ for w in $._config.SLOs.apiserver.windows
+ ] + [
+ w.long
+ for w in $._config.SLOs.apiserver.windows
+ ])
+ ],
+ },
+ {
+ name: 'kube-apiserver-histogram.rules',
+ rules:
+ [
+ {
+ record: 'cluster_quantile:apiserver_request_slo_duration_seconds:histogram_quantile',
+ expr: |||
+ histogram_quantile(0.99, sum by (%s, le, resource) (rate(apiserver_request_slo_duration_seconds_bucket{%s}[5m]))) > 0
+ ||| % [$._config.clusterLabel, std.join(',', [$._config.kubeApiserverSelector, verb.selector, $._config.kubeApiserverNonStreamingSelector])],
+ labels: {
+ verb: verb.type,
+ quantile: '0.99',
+ },
+ }
+ for verb in verbs
+ ],
+ },
+ {
+ name: 'kube-apiserver-availability.rules',
+ interval: '3m',
+ rules: [
+ {
+ record: 'code_verb:apiserver_request_total:increase%s' % SLODays,
+ expr: |||
+ avg_over_time(code_verb:apiserver_request_total:increase1h[%s]) * 24 * %d
+ ||| % [SLODays, $._config.SLOs.apiserver.days],
+ },
+ ] + [
+ {
+ record: 'code:apiserver_request_total:increase%s' % SLODays,
+ expr: |||
+ sum by (%s, code) (code_verb:apiserver_request_total:increase%s{%s})
+ ||| % [$._config.clusterLabel, SLODays, verb.selector],
+ labels: {
+ verb: verb.type,
+ },
+ }
+ for verb in verbs
+ ] + [
+ {
+ record: 'cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h',
+ expr: |||
+ sum by (%(clusterLabel)s, verb, scope) (increase(apiserver_request_slo_duration_seconds_count{%(kubeApiserverSelector)s}[1h]))
+ ||| % $._config,
+ },
+ {
+ record: 'cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase%s' % SLODays,
+ expr: |||
+ sum by (%s, verb, scope) (avg_over_time(cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase1h[%s]) * 24 * %s)
+ ||| % [$._config.clusterLabel, SLODays, $._config.SLOs.apiserver.days],
+ },
+ {
+ record: 'cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h',
+ expr: |||
+ sum by (%(clusterLabel)s, verb, scope, le) (increase(apiserver_request_slo_duration_seconds_bucket[1h]))
+ ||| % $._config,
+ },
+ {
+ record: 'cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%s' % SLODays,
+ expr: |||
+ sum by (%s, verb, scope, le) (avg_over_time(cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase1h[%s]) * 24 * %s)
+ ||| % [$._config.clusterLabel, SLODays, $._config.SLOs.apiserver.days],
+ },
+ {
+ record: 'apiserver_request:availability%s' % SLODays,
+ expr: |||
+ 1 - (
+ (
+ # write too slow
+ sum by (%(clusterLabel)s) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase%(SLODays)s{%(kubeApiserverWriteSelector)s})
+ -
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverWriteSelector)s,le="%(kubeApiserverWriteLatency)s"})
+ ) +
+ (
+ # read too slow
+ sum by (%(clusterLabel)s) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase%(SLODays)s{%(kubeApiserverReadSelector)s})
+ -
+ (
+ (
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope=~"resource|",le="%(kubeApiserverReadResourceLatency)s"})
+ or
+ vector(0)
+ )
+ +
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope="namespace",le="%(kubeApiserverReadNamespaceLatency)s"})
+ +
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope="cluster",le="%(kubeApiserverReadClusterLatency)s"})
+ )
+ ) +
+ # errors
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s{code=~"5.."} or vector(0))
+ )
+ /
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s)
+ ||| % ($._config { SLODays: SLODays }),
+ labels: {
+ verb: 'all',
+ },
+ },
+ {
+ record: 'apiserver_request:availability%s' % SLODays,
+ expr: |||
+ 1 - (
+ sum by (%(clusterLabel)s) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase%(SLODays)s{%(kubeApiserverReadSelector)s})
+ -
+ (
+ # too slow
+ (
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope=~"resource|",le="%(kubeApiserverReadResourceLatency)s"})
+ or
+ vector(0)
+ )
+ +
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope="namespace",le="%(kubeApiserverReadNamespaceLatency)s"})
+ +
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverReadSelector)s,scope="cluster",le="%(kubeApiserverReadClusterLatency)s"})
+ )
+ +
+ # errors
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s{verb="read",code=~"5.."} or vector(0))
+ )
+ /
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s{verb="read"})
+ ||| % ($._config { SLODays: SLODays, days: $._config.SLOs.apiserver.days }),
+ labels: {
+ verb: 'read',
+ },
+ },
+ {
+ record: 'apiserver_request:availability%s' % SLODays,
+ expr: |||
+ 1 - (
+ (
+ # too slow
+ sum by (%(clusterLabel)s) (cluster_verb_scope:apiserver_request_slo_duration_seconds_count:increase%(SLODays)s{%(kubeApiserverWriteSelector)s})
+ -
+ sum by (%(clusterLabel)s) (cluster_verb_scope_le:apiserver_request_slo_duration_seconds_bucket:increase%(SLODays)s{%(kubeApiserverWriteSelector)s,le="%(kubeApiserverWriteLatency)s"})
+ )
+ +
+ # errors
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s{verb="write",code=~"5.."} or vector(0))
+ )
+ /
+ sum by (%(clusterLabel)s) (code:apiserver_request_total:increase%(SLODays)s{verb="write"})
+ ||| % ($._config { SLODays: SLODays, days: $._config.SLOs.apiserver.days }),
+ labels: {
+ verb: 'write',
+ },
+ },
+ ] + [
+ {
+ record: 'code_resource:apiserver_request_total:rate5m',
+ expr: |||
+ sum by (%s,code,resource) (rate(apiserver_request_total{%s}[5m]))
+ ||| % [$._config.clusterLabel, std.join(',', [$._config.kubeApiserverSelector, verb.selector])],
+ labels: {
+ verb: verb.type,
+ },
+ }
+ for verb in verbs
+ ] + [
+ {
+ record: 'code_verb:apiserver_request_total:increase1h',
+ expr: |||
+ sum by (%s, code, verb) (increase(apiserver_request_total{%s,verb=~"LIST|GET|POST|PUT|PATCH|DELETE",code=~"%s"}[1h]))
+ ||| % [$._config.clusterLabel, $._config.kubeApiserverSelector, code],
+ }
+ for code in ['2..', '3..', '4..', '5..']
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_scheduler.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_scheduler.libsonnet
new file mode 100644
index 0000000..f319642
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kube_scheduler.libsonnet
@@ -0,0 +1,31 @@
+{
+ _config+:: {
+ kubeSchedulerSelector: 'job="kube-scheduler"',
+ podLabel: 'pod',
+ },
+
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'kube-scheduler.rules',
+ rules: [
+ {
+ record: 'cluster_quantile:%s:histogram_quantile' % metric,
+ expr: |||
+ histogram_quantile(%(quantile)s, sum(rate(%(metric)s_bucket{%(kubeSchedulerSelector)s}[5m])) without(instance, %(podLabel)s))
+ ||| % ({ quantile: quantile, metric: metric } + $._config),
+ labels: {
+ quantile: quantile,
+ },
+ }
+ for quantile in ['0.99', '0.9', '0.5']
+ for metric in [
+ 'scheduler_e2e_scheduling_duration_seconds',
+ 'scheduler_scheduling_algorithm_duration_seconds',
+ 'scheduler_binding_duration_seconds',
+ ]
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kubelet.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kubelet.libsonnet
new file mode 100644
index 0000000..e932a87
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/kubelet.libsonnet
@@ -0,0 +1,25 @@
+{
+ _config+:: {
+ kubeletSelector: 'job="kubelet"',
+ },
+
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'kubelet.rules',
+ rules: [
+ {
+ record: 'node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile',
+ expr: |||
+ histogram_quantile(%(quantile)s, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{%(kubeletSelector)s}[5m])) by (%(clusterLabel)s, instance, le) * on(%(clusterLabel)s, instance) group_left(node) kubelet_node_name{%(kubeletSelector)s})
+ ||| % ({ quantile: quantile } + $._config),
+ labels: {
+ quantile: quantile,
+ },
+ }
+ for quantile in ['0.99', '0.9', '0.5']
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/node.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/node.libsonnet
new file mode 100644
index 0000000..e4e949b
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/node.libsonnet
@@ -0,0 +1,79 @@
+{
+ _config+:: {
+ kubeStateMetricsSelector: 'job="kube-state-metrics"',
+ nodeExporterSelector: 'job="node-exporter"',
+ podLabel: 'pod',
+ },
+
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'node.rules',
+ rules: [
+ {
+ // This rule results in the tuples (node, namespace, instance) => 1.
+ // It is used to calculate per-node metrics, given namespace & instance.
+ // We use the topk() aggregator to ensure that each (namespace,
+ // instance) tuple is only associated to one node and thus avoid
+ // "many-to-many matching not allowed" errors when joining with
+ // other timeseries on (namespace, instance). See node:node_num_cpu:sum
+ // below for instance.
+ record: 'node_namespace_pod:kube_pod_info:',
+ expr: |||
+ topk by(%(clusterLabel)s, namespace, %(podLabel)s) (1,
+ max by (%(clusterLabel)s, node, namespace, %(podLabel)s) (
+ label_replace(kube_pod_info{%(kubeStateMetricsSelector)s,node!=""}, "%(podLabel)s", "$1", "pod", "(.*)")
+ ))
+ ||| % $._config,
+ },
+ {
+ // This rule gives the number of CPUs per node.
+ record: 'node:node_num_cpu:sum',
+ expr: |||
+ count by (%(clusterLabel)s, node) (
+ node_cpu_seconds_total{mode="idle",%(nodeExporterSelector)s}
+ * on (namespace, %(podLabel)s) group_left(node)
+ topk by(namespace, %(podLabel)s) (1, node_namespace_pod:kube_pod_info:)
+ )
+ ||| % $._config,
+ },
+ // Add separate rules for Available memory, so we can aggregate across clusters in dashboards.
+ {
+ record: ':node_memory_MemAvailable_bytes:sum',
+ expr: |||
+ sum(
+ node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} or
+ (
+ node_memory_Buffers_bytes{%(nodeExporterSelector)s} +
+ node_memory_Cached_bytes{%(nodeExporterSelector)s} +
+ node_memory_MemFree_bytes{%(nodeExporterSelector)s} +
+ node_memory_Slab_bytes{%(nodeExporterSelector)s}
+ )
+ ) by (%(clusterLabel)s)
+ ||| % $._config,
+ },
+ {
+ // This rule gives cpu utilization per node.
+ record: 'node:node_cpu_utilization:ratio_rate5m',
+ expr: |||
+ avg by (%(clusterLabel)s, node) (
+ sum without (mode) (
+ rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal",%(nodeExporterSelector)s}[5m])
+ )
+ )
+ ||| % $._config,
+ },
+ {
+ // This rule gives cpu utilization per cluster
+ record: 'cluster:node_cpu:ratio_rate5m',
+ expr: |||
+ avg by (%(clusterLabel)s) (
+ node:node_cpu_utilization:ratio_rate5m
+ )
+ ||| % $._config,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/rules.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/rules.libsonnet
new file mode 100644
index 0000000..4e116d4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/rules.libsonnet
@@ -0,0 +1,5 @@
+(import 'kube_apiserver.libsonnet') +
+(import 'apps.libsonnet') +
+(import 'kube_scheduler.libsonnet') +
+(import 'node.libsonnet') +
+(import 'kubelet.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/windows.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/windows.libsonnet
new file mode 100644
index 0000000..3532ffa
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/rules/windows.libsonnet
@@ -0,0 +1,256 @@
+{
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'windows.node.rules',
+ rules: [
+ {
+ // This rule gives the number of windows nodes
+ record: 'node:windows_node:sum',
+ expr: |||
+ count (
+ windows_system_system_up_time{%(windowsExporterSelector)s}
+ )
+ ||| % $._config,
+ },
+ {
+ // This rule gives the number of CPUs per node.
+ record: 'node:windows_node_num_cpu:sum',
+ expr: |||
+ count by (instance) (sum by (instance, core) (
+ windows_cpu_time_total{%(windowsExporterSelector)s}
+ ))
+ ||| % $._config,
+ },
+ {
+ // CPU utilisation is % CPU is not idle.
+ record: ':windows_node_cpu_utilisation:avg1m',
+ expr: |||
+ 1 - avg(rate(windows_cpu_time_total{%(windowsExporterSelector)s,mode="idle"}[1m]))
+ ||| % $._config,
+ },
+ {
+ // CPU utilisation is % CPU is not idle.
+ record: 'node:windows_node_cpu_utilisation:avg1m',
+ expr: |||
+ 1 - avg by (instance) (
+ rate(windows_cpu_time_total{%(windowsExporterSelector)s,mode="idle"}[1m])
+ )
+ ||| % $._config,
+ },
+ {
+ record: ':windows_node_memory_utilisation:',
+ expr: |||
+ 1 -
+ sum(windows_memory_available_bytes{%(windowsExporterSelector)s})
+ /
+ sum(windows_os_visible_memory_bytes{%(windowsExporterSelector)s})
+ ||| % $._config,
+ },
+ // Add separate rules for Free & Total, so we can aggregate across clusters
+ // in dashboards.
+ {
+ record: ':windows_node_memory_MemFreeCached_bytes:sum',
+ expr: |||
+ sum(windows_memory_available_bytes{%(windowsExporterSelector)s} + windows_memory_cache_bytes{%(windowsExporterSelector)s})
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_memory_totalCached_bytes:sum',
+ expr: |||
+ (windows_memory_cache_bytes{%(windowsExporterSelector)s} + windows_memory_modified_page_list_bytes{%(windowsExporterSelector)s} + windows_memory_standby_cache_core_bytes{%(windowsExporterSelector)s} + windows_memory_standby_cache_normal_priority_bytes{%(windowsExporterSelector)s} + windows_memory_standby_cache_reserve_bytes{%(windowsExporterSelector)s})
+ ||| % $._config,
+ },
+ {
+ record: ':windows_node_memory_MemTotal_bytes:sum',
+ expr: |||
+ sum(windows_os_visible_memory_bytes{%(windowsExporterSelector)s})
+ ||| % $._config,
+ },
+ {
+ // Available memory per node
+ // SINCE 2018-02-08
+ record: 'node:windows_node_memory_bytes_available:sum',
+ expr: |||
+ sum by (instance) (
+ (windows_memory_available_bytes{%(windowsExporterSelector)s})
+ )
+ ||| % $._config,
+ },
+ {
+ // Total memory per node
+ record: 'node:windows_node_memory_bytes_total:sum',
+ expr: |||
+ sum by (instance) (
+ windows_os_visible_memory_bytes{%(windowsExporterSelector)s}
+ )
+ ||| % $._config,
+ },
+ {
+ // Memory utilisation per node, normalized by per-node memory
+ record: 'node:windows_node_memory_utilisation:ratio',
+ expr: |||
+ (node:windows_node_memory_bytes_total:sum - node:windows_node_memory_bytes_available:sum)
+ /
+ scalar(sum(node:windows_node_memory_bytes_total:sum))
+ |||,
+ },
+ {
+ record: 'node:windows_node_memory_utilisation:',
+ expr: |||
+ 1 - (node:windows_node_memory_bytes_available:sum / node:windows_node_memory_bytes_total:sum)
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_memory_swap_io_pages:irate',
+ expr: |||
+ irate(windows_memory_swap_page_operations_total{%(windowsExporterSelector)s}[5m])
+ ||| % $._config,
+ },
+ {
+ // Disk utilisation (ms spent, by rate() it's bound by 1 second)
+ record: ':windows_node_disk_utilisation:avg_irate',
+ expr: |||
+ avg(irate(windows_logical_disk_read_seconds_total{%(windowsExporterSelector)s}[1m]) +
+ irate(windows_logical_disk_write_seconds_total{%(windowsExporterSelector)s}[1m])
+ )
+ ||| % $._config,
+ },
+ {
+ // Disk utilisation (ms spent, by rate() it's bound by 1 second)
+ record: 'node:windows_node_disk_utilisation:avg_irate',
+ expr: |||
+ avg by (instance) (
+ (irate(windows_logical_disk_read_seconds_total{%(windowsExporterSelector)s}[1m]) +
+ irate(windows_logical_disk_write_seconds_total{%(windowsExporterSelector)s}[1m]))
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_filesystem_usage:',
+ expr: |||
+ max by (instance,volume)(
+ (windows_logical_disk_size_bytes{%(windowsExporterSelector)s}
+ - windows_logical_disk_free_bytes{%(windowsExporterSelector)s})
+ / windows_logical_disk_size_bytes{%(windowsExporterSelector)s}
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_filesystem_avail:',
+ expr: |||
+ max by (instance, volume) (windows_logical_disk_free_bytes{%(windowsExporterSelector)s} / windows_logical_disk_size_bytes{%(windowsExporterSelector)s})
+ ||| % $._config,
+ },
+ {
+ record: ':windows_node_net_utilisation:sum_irate',
+ expr: |||
+ sum(irate(windows_net_bytes_total{%(windowsExporterSelector)s}[1m]))
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_net_utilisation:sum_irate',
+ expr: |||
+ sum by (instance) (
+ (irate(windows_net_bytes_total{%(windowsExporterSelector)s}[1m]))
+ )
+ ||| % $._config,
+ },
+ {
+ record: ':windows_node_net_saturation:sum_irate',
+ expr: |||
+ sum(irate(windows_net_packets_received_discarded_total{%(windowsExporterSelector)s}[1m])) +
+ sum(irate(windows_net_packets_outbound_discarded_total{%(windowsExporterSelector)s}[1m]))
+ ||| % $._config,
+ },
+ {
+ record: 'node:windows_node_net_saturation:sum_irate',
+ expr: |||
+ sum by (instance) (
+ (irate(windows_net_packets_received_discarded_total{%(windowsExporterSelector)s}[1m]) +
+ irate(windows_net_packets_outbound_discarded_total{%(windowsExporterSelector)s}[1m]))
+ )
+ ||| % $._config,
+ },
+ ],
+ },
+ {
+ name: 'windows.pod.rules',
+ rules: [
+ {
+ record: 'windows_pod_container_available',
+ expr: |||
+ windows_container_available{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'windows_container_total_runtime',
+ expr: |||
+ windows_container_cpu_usage_seconds_total{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'windows_container_memory_usage',
+ expr: |||
+ windows_container_memory_usage_commit_bytes{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'windows_container_private_working_set_usage',
+ expr: |||
+ windows_container_memory_usage_private_working_set_bytes{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'windows_container_network_received_bytes_total',
+ expr: |||
+ windows_container_network_receive_bytes_total{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'windows_container_network_transmitted_bytes_total',
+ expr: |||
+ windows_container_network_transmit_bytes_total{%(windowsExporterSelector)s, container_id != ""} * on(container_id) group_left(container, pod, namespace) max(kube_pod_container_info{%(kubeStateMetricsSelector)s, container_id != ""}) by(container, container_id, pod, namespace)
+ ||| % $._config,
+ },
+ {
+ record: 'kube_pod_windows_container_resource_memory_request',
+ expr: |||
+ max by (namespace, pod, container) (
+ kube_pod_container_resource_requests{resource="memory",%(kubeStateMetricsSelector)s}
+ ) * on(container,pod,namespace) (windows_pod_container_available)
+ ||| % $._config,
+ },
+ {
+ record: 'kube_pod_windows_container_resource_memory_limit',
+ expr: |||
+ kube_pod_container_resource_limits{resource="memory",%(kubeStateMetricsSelector)s} * on(container,pod,namespace) (windows_pod_container_available)
+ ||| % $._config,
+ },
+ {
+ record: 'kube_pod_windows_container_resource_cpu_cores_request',
+ expr: |||
+ max by (namespace, pod, container) (
+ kube_pod_container_resource_requests{resource="cpu",%(kubeStateMetricsSelector)s}
+ ) * on(container,pod,namespace) (windows_pod_container_available)
+ ||| % $._config,
+ },
+ {
+ record: 'kube_pod_windows_container_resource_cpu_cores_limit',
+ expr: |||
+ kube_pod_container_resource_limits{resource="cpu",%(kubeStateMetricsSelector)s} * on(container,pod,namespace) (windows_pod_container_available)
+ ||| % $._config,
+ },
+ {
+ record: 'namespace_pod_container:windows_container_cpu_usage_seconds_total:sum_rate',
+ expr: |||
+ sum by (namespace, pod, container) (
+ rate(windows_container_total_runtime{}[5m])
+ )
+ ||| % $._config,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/runbook.md b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/runbook.md
new file mode 100644
index 0000000..e136d1e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/runbook.md
@@ -0,0 +1,163 @@
+# Kubernetes Alert Runbooks
+
+As Rob Ewaschuk [puts it](https://docs.google.com/document/d/199PqyG3UsyXlwieHaqbGiWVa8eMWi8zzAn0YfcApr8Q/edit#):
+> Playbooks (or runbooks) are an important part of an alerting system; it's best to have an entry for each alert or family of alerts that catch a symptom, which can further explain what the alert means and how it might be addressed.
+
+It is a recommended practice that you add an annotation of "runbook" to every prometheus alert with a link to a clear description of it's meaning and suggested remediation or mitigation. While some problems will require private and custom solutions, most common problems have common solutions. In practice, you'll want to automate many of the procedures (rather than leaving them in a wiki), but even a self-correcting problem should provide an explanation as to what happened and why to observers.
+
+Matthew Skelton & Rob Thatcher have an excellent [run book template](https://github.com/SkeltonThatcher/run-book-template). This template will help teams to fully consider most aspects of reliably operating most interesting software systems, if only to confirm that "this section definitely does not apply here" - a valuable realization.
+
+This page collects this repositories alerts and begins the process of describing what they mean and how it might be addressed. Links from alerts to this page are added [automatically](https://github.com/kubernetes-monitoring/kubernetes-mixin/blob/master/lib/add-runbook-links.libsonnet).
+
+### Group Name: "kubernetes-absent"
+##### Alert Name: "KubeAPIDown"
++ *Message*: `KubeAPI has disappeared from Prometheus target discovery.`
++ *Severity*: critical
+##### Alert Name: "KubeControllerManagerDown"
++ *Message*: `KubeControllerManager has disappeared from Prometheus target discovery.`
++ *Severity*: critical
++ *Runbook*: [Link](https://coreos.com/tectonic/docs/latest/troubleshooting/controller-recovery.html#recovering-a-controller-manager)
+##### Alert Name: KubeSchedulerDown
++ *Message*: `KubeScheduler has disappeared from Prometheus target discovery`
++ *Severity*: critical
++ *Runbook*: [Link](https://coreos.com/tectonic/docs/latest/troubleshooting/controller-recovery.html#recovering-a-scheduler)
+##### Alert Name: KubeletDown
++ *Message*: `Kubelet has disappeared from Prometheus target discovery.`
++ *Severity*: critical
+##### Alert Name: KubeProxyDown
++ *Message*: `KubeProxy has disappeared from Prometheus target discovery`
++ *Severity*: critical
++ *Runbook*: [Link](https://runbooks.prometheus-operator.dev/runbooks/kubernetes/kubeproxydown/)
+### Group Name: kubernetes-apps
+##### Alert Name: KubePodCrashLooping
++ *Message*: `{{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is restarting {{ printf \"%.2f\" $value }} / second`
++ *Severity*: warning
+##### Alert Name: "KubePodNotReady"
++ *Message*: `{{ $labels.namespace }}/{{ $labels.pod }} is not ready.`
++ *Severity*: warning
+##### Alert Name: "KubeDeploymentGenerationMismatch"
++ *Message*: `Deployment {{ $labels.namespace }}/{{ $labels.deployment }} generation mismatch`
++ *Severity*: warning
+##### Alert Name: "KubeDeploymentReplicasMismatch"
++ *Message*: `Deployment {{ $labels.namespace }}/{{ $labels.deployment }} replica mismatch`
++ *Severity*: warning
+##### Alert Name: "KubeDeploymentRolloutStuck"
++ *Message*: `Rollout of deployment {{ $labels.namespace }}/{{ $labels.deployment }} is not progressing`
++ *Severity*: warning
+##### Alert Name: "KubeStatefulSetReplicasMismatch"
++ *Message*: `StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} replica mismatch`
++ *Severity*: warning
+##### Alert Name: "KubeStatefulSetGenerationMismatch"
++ *Message*: `StatefulSet {{ $labels.namespace }}/{{ $labels.statefulset }} generation mismatch`
++ *Severity*: warning
+##### Alert Name: "KubeDaemonSetRolloutStuck"
++ *Message*: `Only {{$value | humanizePercentage }} of desired pods scheduled and ready for daemon set {{$labels.namespace}}/{{$labels.daemonset}}`
++ *Severity*: warning
+##### Alert Name: "KubeContainerWaiting"
++ *Message*: `{{ $labels.namespace }}/{{ $labels.pod }} ({{ $labels.container }}) is in waiting state.`
++ *Severity*: warning
+##### Alert Name: "KubeDaemonSetNotScheduled"
++ *Message*: `A number of pods of daemonset {{$labels.namespace}}/{{$labels.daemonset}} are not scheduled.`
++ *Severity*: warning
+
+##### Alert Name: "KubeDaemonSetMisScheduled"
++ *Message*: `A number of pods of daemonset {{$labels.namespace}}/{{$labels.daemonset}} are running where they are not supposed to run.`
++ *Severity*: warning
+
+##### Alert Name: "KubeJobNotCompleted"
++ *Message*: `Job {{ $labels.namespace }}/{{ $labels.job_name }} is taking more than {{ "%(kubeJobTimeoutDuration)s" | humanizeDuration }} to complete.`
++ *Severity*: warning
++ *Action*: Check the job using `kubectl describe job <job>` and look at the pod logs using `kubectl logs <pod>` for further information.
+
+##### Alert Name: "KubeJobFailed"
++ *Message*: `Job {{ $labels.namespace }}/{{ $labels.job_name }} failed to complete.`
++ *Severity*: warning
++ *Action*: Check the job using `kubectl describe job <job>` and look at the pod logs using `kubectl logs <pod>` for further information.
+
+### Group Name: "kubernetes-resources"
+##### Alert Name: "KubeCPUOvercommit"
++ *Message*: `Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure.`
++ *Severity*: warning
+##### Alert Name: "KubeMemOvercommit"
++ *Message*: `Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure.`
++ *Severity*: warning
+##### Alert Name: "KubeCPUQuotaOvercommit"
++ *Message*: `Cluster has overcommitted CPU resource requests for Namespaces.`
++ *Severity*: warning
+##### Alert Name: "KubeMemQuotaOvercommit"
++ *Message*: `Cluster has overcommitted memory resource requests for Namespaces.`
++ *Severity*: warning
+##### Alert Name: "KubeQuotaAlmostFull"
++ *Message*: `{{ $value | humanizePercentage }} usage of {{ $labels.resource }} in namespace {{ $labels.namespace }}.`
++ *Severity*: info
+##### Alert Name: "KubeQuotaFullyUsed"
++ *Message*: `{{ $value | humanizePercentage }} usage of {{ $labels.resource }} in namespace {{ $labels.namespace }}.`
++ *Severity*: info
+##### Alert Name: "KubeQuotaExceeded"
++ *Message*: `{{ $value | humanizePercentage }} usage of {{ $labels.resource }} in namespace {{ $labels.namespace }}.`
++ *Severity*: warning
+### Group Name: "kubernetes-storage"
+##### Alert Name: "KubePersistentVolumeFillingUp"
++ *Message*: `The persistent volume claimed by {{ $labels.persistentvolumeclaim }} in namespace {{ $labels.namespace }} has {{ $value | humanizePercentage }} free.`
++ *Severity*: critical
+##### Alert Name: "KubePersistentVolumeFillingUp"
++ *Message*: `Based on recent sampling, the persistent volume claimed by {{ $labels.persistentvolumeclaim }} in namespace {{ $labels.namespace }} is expected to fill up within four days.`
++ *Severity*: warning
+### Group Name: "kubernetes-system"
+##### Alert Name: "KubeNodeNotReady"
++ *Message*: `{{ $labels.node }} has been unready for more than 15 minutes."`
++ *Severity*: warning
+##### Alert Name: "KubeNodeUnreachable"
++ *Message*: `{{ $labels.node }} is unreachable and some workloads may be rescheduled.`
++ *Severity*: warning
+##### Alert Name: "KubeletTooManyPods"
++ *Message*: `Kubelet '{{ $labels.node }}' is running at {{ $value | humanizePercentage }} of its Pod capacity.`
++ *Severity*: info
+##### Alert Name: "KubeNodeReadinessFlapping"
++ *Message*: `The readiness status of node {{ $labels.node }} has changed {{ $value }} times in the last 15 minutes.`
++ *Severity*: warning
+##### Alert Name: "KubeletPlegDurationHigh"
++ *Message*: `The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{ $value }} seconds on node {{ $labels.node }}.`
++ *Severity*: warning
+##### Alert Name: "KubeletPodStartUpLatencyHigh"
++ *Message*: `Kubelet Pod startup 99th percentile latency is {{ $value }} seconds on node {{ $labels.node }}.`
++ *Severity*: warning
+##### Alert Name: "KubeletClientCertificateExpiration"
++ *Message*: `Client certificate for Kubelet on node {{ $labels.node }} expires in 7 days.`
++ *Severity*: warning
+##### Alert Name: "KubeletClientCertificateExpiration"
++ *Message*: `Client certificate for Kubelet on node {{ $labels.node }} expires in 1 day.`
++ *Severity*: critical
+##### Alert Name: "KubeletServerCertificateExpiration"
++ *Message*: `Server certificate for Kubelet on node {{ $labels.node }} expires in 7 days.`
++ *Severity*: warning
+##### Alert Name: "KubeletServerCertificateExpiration"
++ *Message*: `Server certificate for Kubelet on node {{ $labels.node }} expires in 1 day.`
++ *Severity*: critical
+##### Alert Name: "KubeletClientCertificateRenewalErrors"
++ *Message*: `Kubelet on node {{ $labels.node }} has failed to renew its client certificate ({{ $value | humanize }} errors in the last 15 minutes).`
++ *Severity*: warning
+##### Alert Name: "KubeletServerCertificateRenewalErrors"
++ *Message*: `Kubelet on node {{ $labels.node }} has failed to renew its server certificate ({{ $value | humanize }} errors in the last 5 minutes).`
++ *Severity*: warning
+##### Alert Name: "KubeVersionMismatch"
++ *Message*: `There are {{ $value }} different versions of Kubernetes components running.`
++ *Severity*: warning
+##### Alert Name: "KubeClientErrors"
++ *Message*: `Kubernetes API server client '{{ $labels.job }}/{{ $labels.instance }}' is experiencing {{ $value | humanizePercentage }} errors.'`
++ *Severity*: warning
+##### Alert Name: "KubeClientCertificateExpiration"
++ *Message*: `A client certificate used to authenticate to the apiserver is expiring in less than 7 days.`
++ *Severity*: warning
+##### Alert Name: "KubeClientCertificateExpiration"
++ *Message*: `A client certificate used to authenticate to the apiserver is expiring in less than 1 day.`
++ *Severity*: critical
+##### Alert Name: "KubeAPITerminatedRequests"
++ *Message*: `The apiserver has terminated {{ $value | humanizePercentage }} of its incoming requests.`
++ *Severity*: warning
++ *Action*: Use the `apiserver_flowcontrol_rejected_requests_total` metric to determine which flow schema is throttling the traffic to the API Server. The flow schema also provides information on the affected resources and subjects.
+
+## Other Kubernetes Runbooks and troubleshooting
++ [Troubleshoot Clusters ](https://kubernetes.io/docs/tasks/debug-application-cluster/debug-cluster/)
++ [Cloud.gov Kubernetes Runbook ](https://landing.app.cloud.gov/docs/ops/runbook/troubleshooting-kubernetes/)
++ [Recover a Broken Cluster](https://codefresh.io/Kubernetes-Tutorial/recover-broken-kubernetes-cluster/)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/check-selectors-ksm.sh b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/check-selectors-ksm.sh
new file mode 100755
index 0000000..b6b6a9e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/check-selectors-ksm.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+# Set -u to error out if we use an unset variable.
+# Set -o pipefail to propagate errors in a pipeline.
+set -uo pipefail
+
+# Remove kube-state-metrics directory if it exists.
+rm -rf kube-state-metrics
+
+# Clone kube-state-metrics repository.
+git clone https://github.com/kubernetes/kube-state-metrics --depth 1
+
+# Set the repository root.
+repository_root=$(git rev-parse --show-toplevel)
+
+# Change directory to kube-state-metrics.
+cd kube-state-metrics || exit
+
+# Grep all metrics in the codebase.
+find internal/store -type f -not -name '*_test.go' -exec sed -nE 's/.*"(kube_[^"]+)".*/\1/p' {} \; | sort -u > metrics.txt
+
+# Set the KSM selector specifier.
+ksm_selector="kubeStateMetricsSelector"
+
+# Set the paths to the alerts, lib and rules directories.
+alerts_path="$repository_root/alerts"
+lib_path="$repository_root/lib"
+rules_path="$repository_root/rules"
+
+# Read metrics.txt line by line.
+while IFS= read -r metric; do
+ selector_misses=$(\
+ grep --only-matching --color=always --line-number "$metric{[^}]*}" --directories=recurse "$alerts_path" "$lib_path" "$rules_path" |\
+ grep --invert-match "$ksm_selector" \
+ )
+ if [ -n "$selector_misses" ]; then
+ echo "The following $metric metrics are missing the $ksm_selector specifier:"
+ echo "$selector_misses"
+ fi
+done < metrics.txt
+
+# Clean artefacts.
+rm metrics.txt
+cd .. || exit
+rm -rf kube-state-metrics
+
+# TODO: Currently, there are only two possible states the workflow can report: success or failure.
+# We could benefit from a third "warning" state, for cases where we observe an overlap of selectors for the same metric.
+# Ref: https://docs.github.com/en/actions/creating-actions/setting-exit-codes-for-actions#about-exit-codes
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.mod b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.mod
new file mode 100644
index 0000000..dccb0af
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.mod
@@ -0,0 +1,138 @@
+module _
+
+go 1.17
+
+require (
+ github.com/google/go-jsonnet v0.17.1-0.20210520122306-7373f5b60678
+ github.com/grafana/dashboard-linter v0.0.0-20220121193616-222f7f5cfe30
+ github.com/jsonnet-bundler/jsonnet-bundler v0.4.0
+ github.com/prometheus/prometheus v1.8.2-0.20211011171444-354d8d2ecfac
+)
+
+require (
+ cloud.google.com/go v0.93.3 // indirect
+ github.com/Azure/azure-sdk-for-go v57.1.0+incompatible // indirect
+ github.com/Azure/go-autorest/autorest v0.11.20 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.15 // indirect
+ github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
+ github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
+ github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
+ github.com/Azure/go-autorest/logger v0.2.1 // indirect
+ github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
+ github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 // indirect
+ github.com/armon/go-metrics v0.3.3 // indirect
+ github.com/aws/aws-sdk-go v1.40.37 // indirect
+ github.com/beorn7/perks v1.0.1 // indirect
+ github.com/cespare/xxhash/v2 v2.1.2 // indirect
+ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed // indirect
+ github.com/containerd/containerd v1.5.4 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/dennwc/varint v1.0.0 // indirect
+ github.com/digitalocean/godo v1.65.0 // indirect
+ github.com/docker/distribution v2.7.1+incompatible // indirect
+ github.com/docker/docker v20.10.8+incompatible // indirect
+ github.com/docker/go-connections v0.4.0 // indirect
+ github.com/docker/go-units v0.4.0 // indirect
+ github.com/edsrzf/mmap-go v1.0.0 // indirect
+ github.com/envoyproxy/go-control-plane v0.9.9 // indirect
+ github.com/envoyproxy/protoc-gen-validate v0.6.1 // indirect
+ github.com/fatih/color v1.10.0 // indirect
+ github.com/fsnotify/fsnotify v1.5.1 // indirect
+ github.com/go-kit/log v0.1.0 // indirect
+ github.com/go-logfmt/logfmt v0.5.1 // indirect
+ github.com/go-logr/logr v0.4.0 // indirect
+ github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 // indirect
+ github.com/go-zookeeper/zk v1.0.2 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
+ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang/protobuf v1.5.2 // indirect
+ github.com/golang/snappy v0.0.4 // indirect
+ github.com/google/go-cmp v0.5.6 // indirect
+ github.com/google/go-querystring v1.0.0 // indirect
+ github.com/google/gofuzz v1.1.0 // indirect
+ github.com/google/pprof v0.0.0-20210827144239-02619b876842 // indirect
+ github.com/googleapis/gax-go/v2 v2.1.0 // indirect
+ github.com/googleapis/gnostic v0.5.5 // indirect
+ github.com/gophercloud/gophercloud v0.20.0 // indirect
+ github.com/hashicorp/consul/api v1.10.1 // indirect
+ github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
+ github.com/hashicorp/go-hclog v0.12.2 // indirect
+ github.com/hashicorp/go-immutable-radix v1.2.0 // indirect
+ github.com/hashicorp/go-rootcerts v1.0.2 // indirect
+ github.com/hashicorp/golang-lru v0.5.4 // indirect
+ github.com/hashicorp/hcl v1.0.0 // indirect
+ github.com/hashicorp/serf v0.9.5 // indirect
+ github.com/hetznercloud/hcloud-go v1.32.0 // indirect
+ github.com/imdario/mergo v0.3.11 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/jpillora/backoff v1.0.0 // indirect
+ github.com/json-iterator/go v1.1.11 // indirect
+ github.com/linode/linodego v0.32.0 // indirect
+ github.com/magiconair/properties v1.8.5 // indirect
+ github.com/mattn/go-colorable v0.1.8 // indirect
+ github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
+ github.com/miekg/dns v1.1.43 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/mapstructure v1.4.2 // indirect
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+ github.com/modern-go/reflect2 v1.0.1 // indirect
+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.0.1 // indirect
+ github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/pelletier/go-toml v1.9.4 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/prometheus/client_golang v1.11.0 // indirect
+ github.com/prometheus/client_model v0.2.0 // indirect
+ github.com/prometheus/common v0.31.1 // indirect
+ github.com/prometheus/common/sigv4 v0.1.0 // indirect
+ github.com/prometheus/exporter-toolkit v0.6.1 // indirect
+ github.com/prometheus/procfs v0.6.0 // indirect
+ github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 // indirect
+ github.com/sirupsen/logrus v1.7.0 // indirect
+ github.com/spf13/afero v1.6.0 // indirect
+ github.com/spf13/cast v1.4.1 // indirect
+ github.com/spf13/cobra v1.2.1 // indirect
+ github.com/spf13/jwalterweatherman v1.1.0 // indirect
+ github.com/spf13/pflag v1.0.5 // indirect
+ github.com/spf13/viper v1.9.0 // indirect
+ github.com/stretchr/testify v1.7.0 // indirect
+ github.com/subosito/gotenv v1.2.0 // indirect
+ github.com/uber/jaeger-client-go v2.29.1+incompatible // indirect
+ github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
+ go.opencensus.io v0.23.0 // indirect
+ go.uber.org/atomic v1.9.0 // indirect
+ go.uber.org/goleak v1.1.10 // indirect
+ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
+ golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
+ golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f // indirect
+ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
+ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
+ golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 // indirect
+ golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
+ golang.org/x/text v0.3.6 // indirect
+ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
+ golang.org/x/tools v0.1.5 // indirect
+ google.golang.org/api v0.56.0 // indirect
+ google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 // indirect
+ google.golang.org/grpc v1.40.0 // indirect
+ google.golang.org/protobuf v1.27.1 // indirect
+ gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect
+ gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/ini.v1 v1.63.2 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+ k8s.io/api v0.22.1 // indirect
+ k8s.io/apimachinery v0.22.1 // indirect
+ k8s.io/client-go v0.22.1 // indirect
+ k8s.io/klog/v2 v2.10.0 // indirect
+ k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 // indirect
+ sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
+ sigs.k8s.io/yaml v1.2.0 // indirect
+)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.sum b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.sum
new file mode 100644
index 0000000..52a7d67
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/go.sum
@@ -0,0 +1,2063 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3 h1:wPBktZFzYBcCZVARvwVKqH1uEj+aLXofJEtrb4oOsio=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
+cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v57.1.0+incompatible h1:TKQ3ieyB0vVKkF6t9dsWbMjq56O1xU3eh3Ec09v6ajM=
+github.com/Azure/azure-sdk-for-go v57.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
+github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M=
+github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
+github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk=
+github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
+github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
+github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
+github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
+github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI=
+github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
+github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I=
+github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
+github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/sprig v2.16.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
+github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
+github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922 h1:8ypNbf5sd3Sm3cKJ9waOGoQv6dKAFiFty9L6NP1AqJ4=
+github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
+github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
+github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro=
+github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
+github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
+github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
+github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
+github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/aws/aws-sdk-go v1.40.37 h1:I+Q6cLctkFyMMrKukcDnj+i2kjrQ37LGiOM6xmsxC48=
+github.com/aws/aws-sdk-go v1.40.37/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
+github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
+github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI=
+github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
+github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
+github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA=
+github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
+github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
+github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
+github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/digitalocean/godo v1.65.0 h1:3SywGJBC18HaYtPQF+T36jYzXBi+a6eIMonSjDll7TA=
+github.com/digitalocean/godo v1.65.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM=
+github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
+github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
+github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA=
+github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM=
+github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/fatih/color v1.10.0 h1:s36xzo75JdqLaaWoiEHk767eHiwo0598uUxyfiPkDsg=
+github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp4nseejPd+UKxtCVQ2hUxNTZ7qQZJa7CLriIeo=
+github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
+github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
+github.com/go-chi/chi v4.1.0+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
+github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
+github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
+github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ=
+github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk=
+github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
+github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
+github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI=
+github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
+github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY=
+github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
+github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc=
+github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4=
+github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o=
+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
+github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
+github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
+github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo=
+github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98=
+github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk=
+github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
+github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
+github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ=
+github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
+github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
+github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
+github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
+github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
+github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
+github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
+github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY=
+github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M=
+github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
+github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo=
+github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8=
+github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4=
+github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI=
+github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0=
+github.com/go-openapi/validate v0.20.2/go.mod h1:e7OJoKNgd0twXZwIn0A43tHbvIcr/rZIVCbJBpTUoY0=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY=
+github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
+github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-jsonnet v0.17.1-0.20210520122306-7373f5b60678 h1:vA3dFpscjPP1UmmGAgdycq7mW6zaCwiJ0byqfXuPc/A=
+github.com/google/go-jsonnet v0.17.1-0.20210520122306-7373f5b60678/go.mod h1:C3fTzyVJDslXdiTqw/bTFk7vSGyCtH3MGRbDfvEwGd0=
+github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200417002340-c6e0a841f49a/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210827144239-02619b876842 h1:JCrt5MIE1fHQtdy1825HwJ45oVQaqHE6lgssRhjcg/o=
+github.com/google/pprof v0.0.0-20210827144239-02619b876842/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
+github.com/gophercloud/gophercloud v0.20.0 h1:1+4jrsjVhdX5omlAo4jkmFc6ftLbuXLzgFo4i6lH+Gk=
+github.com/gophercloud/gophercloud v0.20.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
+github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/grafana/dashboard-linter v0.0.0-20220121193616-222f7f5cfe30 h1:kN2+FJURJMvIEkiL7NVnvVvgGHjarSAIDUpQ6z8WHqc=
+github.com/grafana/dashboard-linter v0.0.0-20220121193616-222f7f5cfe30/go.mod h1://CkibdjQDn6tp3o6QEso8n75KsHI/14BnRRDbx2hN0=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
+github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
+github.com/hashicorp/consul/api v1.10.1 h1:MwZJp86nlnL+6+W1Zly4JUuVn9YHhMggBirMpHGD7kw=
+github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
+github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
+github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v0.12.2 h1:F1fdYblUEsxKiailtkhCCG2g4bipEgaHiDc8vffNpD4=
+github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8=
+github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
+github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU=
+github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
+github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0=
+github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
+github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
+github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I=
+github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
+github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ=
+github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc=
+github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
+github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
+github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo=
+github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
+github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
+github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
+github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
+github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
+github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
+github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
+github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
+github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc=
+github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
+github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
+github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
+github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
+github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
+github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/linode/linodego v0.32.0 h1:IK04cx2b/IwAAd6XLruf1Dl/n3dRXj87Uw/5qo6afVU=
+github.com/linode/linodego v0.32.0/go.mod h1:BR0gVkCJffEdIGJSl6bHR80Ty+Uvg/2jkjmrWaFectM=
+github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
+github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
+github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
+github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
+github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
+github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
+github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
+github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
+github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
+github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
+github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
+github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
+github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
+github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
+github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
+github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg=
+github.com/prometheus/alertmanager v0.23.0/go.mod h1:0MLTrjQI8EuVmvykEhcfr/7X0xmaDAZrqMgxIq3OXHk=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U=
+github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.31.1 h1:d18hG4PkHnNAKNMOmFuXFaiY8Us0nird/2m60uS1AMs=
+github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
+github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
+github.com/prometheus/exporter-toolkit v0.6.1 h1:Aqk75wQD92N9CqmTlZwjKwq6272nOGrWIbc8Z7+xQO0=
+github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY=
+github.com/prometheus/prometheus v1.8.2-0.20211011171444-354d8d2ecfac h1:emphJoDK6yZ1GxyFbyYa7ByoWkl3dXfOmPvHAy0L0XI=
+github.com/prometheus/prometheus v1.8.2-0.20211011171444-354d8d2ecfac/go.mod h1:wP6L5BiOZ1JZYadRh17u5RujSS19zNSGZfGUi/MZUpM=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
+github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
+github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
+github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
+github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
+github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk=
+github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
+github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
+github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
+github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=
+github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
+github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
+github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
+go.mongodb.org/mongo-driver v1.3.2/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
+go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE=
+go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
+go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
+go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f h1:w6wWR0H+nyVpbSAQbzVEIACVyr/h8l/BEkY6Sokc7Eg=
+golang.org/x/net v0.0.0-20210903162142-ad29c8ab022f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34 h1:GkvMjFtXUmahfDtashnc1mnrCtuBVcwse5QV2lUk/tI=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304024140-c4206d458c3f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
+gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83 h1:3V2dxSZpz4zozWWUq36vUxXEKnSYitEH2LdsAx+RUmg=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
+gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
+gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c=
+gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.17.5/go.mod h1:0zV5/ungglgy2Rlm3QK8fbxkXVs+BSJWpJP/+8gUVLY=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
+k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
+k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
+k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.10.0 h1:R2HDMDJsHVTHA2n4RjwbeYXdOcBymXdX/JRb1v0VGhE=
+k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 h1:imL9YgXQ9p7xmPzHFm/vVd/cF78jad+n4wK1ABwYtMM=
+k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/tools.go b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/tools.go
new file mode 100644
index 0000000..ccb3054
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/scripts/tools.go
@@ -0,0 +1,15 @@
+//go:build tools
+// +build tools
+
+// Package tools tracks dependencies for tools that used in the build process.
+// See https://github.com/golang/go/issues/25922
+package tools
+
+import (
+ _ "github.com/google/go-jsonnet/cmd/jsonnet"
+ _ "github.com/google/go-jsonnet/cmd/jsonnet-lint"
+ _ "github.com/google/go-jsonnet/cmd/jsonnetfmt"
+ _ "github.com/grafana/dashboard-linter"
+ _ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb"
+ _ "github.com/prometheus/prometheus/cmd/promtool"
+)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests.yaml b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests.yaml
new file mode 100644
index 0000000..a2fb0bb
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/kubernetes-monitoring/kubernetes-mixin/tests.yaml
@@ -0,0 +1,1240 @@
+rule_files:
+ - prometheus_alerts.yaml
+ - prometheus_rules.yaml
+
+evaluation_interval: 1m
+
+tests:
+# PersistentVolume disk space
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolume is filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring is only 1.562% free.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup
+
+# Don't alert when PVC access_mode is ReadOnlyMany
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadOnlyMany", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeFillingUp
+
+# Block volume mounts can report 0 for the kubelet_volume_stats_used_bytes metric but it shouldn't trigger the KubePersistentVolumeFillingUp alert.
+# See https://github.com/kubernetes/kubernetes/commit/b997e0e4d6ccbead435a47d6ac75b0db3d17252f for details.
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '0 0 0 0'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeFillingUp
+
+ # Don't alert when PVC has been labelled as fully utilised
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ - series: 'kube_persistentvolumeclaim_labels{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc",label_excluded_from_alerts="true"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeFillingUp
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ alert_rule_test:
+ - eval_time: 1h
+ alertname: KubePersistentVolumeFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolume is filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring is only 1.294% free.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: warning
+ exp_annotations:
+ summary: "PersistentVolume is filling up."
+ description: 'Based on recent sampling, the PersistentVolume claimed by somepvc in Namespace monitoring is expected to fill up within four days. Currently 1.263% is available.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolume is filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring is only 1.263% free.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumefillingup
+
+# Block volume mounts can report 0 for the kubelet_volume_stats_used_bytes metric but it shouldn't trigger the KubePersistentVolumeFillingUp alert.
+# See https://github.com/kubernetes/kubernetes/commit/b997e0e4d6ccbead435a47d6ac75b0db3d17252f for details.
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '0x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeFillingUp
+
+# Don't alert when PVC access_mode is ReadOnlyMany
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadOnlyMany", service="kube-state-metrics"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeFillingUp
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_available_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1x61'
+ - series: 'kube_persistentvolumeclaim_labels{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc",label_excluded_from_alerts="true"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeFillingUp
+
+# PersistentVolume inodes
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeInodesFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolumeInodes are filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring only has 1.562% free inodes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeinodesfillingup
+
+# Don't alert when PVC access_mode is ReadOnlyMany
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadOnlyMany", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+# Block volume mounts can report 0 for the kubelet_volume_stats_inodes_used metric but it shouldn't trigger the KubePersistentVolumeInodesFillingUp alert.
+# See https://github.com/kubernetes/kubernetes/commit/b997e0e4d6ccbead435a47d6ac75b0db3d17252f for details.
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '0 0 0 0'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+ # Don't alert when PVC has been labelled as fully utilised
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 512 64 16'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024 1024 1024 1024'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '16 64 512 1024'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1 1 1 1'
+ - series: 'kube_persistentvolumeclaim_labels{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc",label_excluded_from_alerts="true"}'
+ values: '1 1 1 1'
+ alert_rule_test:
+ - eval_time: 1m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 2m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 3m
+ alertname: KubePersistentVolumeInodesFillingUp
+ - eval_time: 4m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ alert_rule_test:
+ - eval_time: 1h
+ alertname: KubePersistentVolumeInodesFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolumeInodes are filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring only has 1.294% free inodes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeinodesfillingup
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeInodesFillingUp
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: warning
+ exp_annotations:
+ summary: "PersistentVolumeInodes are filling up."
+ description: 'Based on recent sampling, the PersistentVolume claimed by somepvc in Namespace monitoring is expected to run out of inodes within four days. Currently 1.263% of its inodes are free.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeinodesfillingup
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ persistentvolumeclaim: somepvc
+ severity: critical
+ exp_annotations:
+ summary: "PersistentVolumeInodes are filling up."
+ description: 'The PersistentVolume claimed by somepvc in Namespace monitoring only has 1.263% free inodes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepersistentvolumeinodesfillingup
+
+# Block volume mounts can report 0 for the kubelet_volume_stats_inodes_used metric but it shouldn't trigger the KubePersistentVolumeInodesFillingUp alert.
+# See https://github.com/kubernetes/kubernetes/commit/b997e0e4d6ccbead435a47d6ac75b0db3d17252f for details.
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_inodes_used{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '0x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+# Don't alert when PVC access_mode is ReadOnlyMany
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_inodes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadOnlyMany", service="kube-state-metrics"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_volume_stats_inodes_free{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024-10x61'
+ - series: 'kubelet_volume_stats_capacity_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '32768+0x61'
+ - series: 'kubelet_volume_stats_used_bytes{job="kubelet",namespace="monitoring",persistentvolumeclaim="somepvc"}'
+ values: '1024+10x61'
+ - series: 'kube_persistentvolumeclaim_access_mode{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc", access_mode="ReadWriteOnce", service="kube-state-metrics"}'
+ values: '1x61'
+ - series: 'kube_persistentvolumeclaim_labels{job="ksm",namespace="monitoring",persistentvolumeclaim="somepvc",label_excluded_from_alerts="true"}'
+ values: '1x61'
+ alert_rule_test:
+ - eval_time: 61m
+ alertname: KubePersistentVolumeInodesFillingUp
+
+- interval: 1m
+ input_series:
+ - series: 'kube_node_status_capacity{resource="pods",instance="172.17.0.5:8443",node="minikube",job="kube-state-metrics", namespace="kube-system"}'
+ values: '3+0x15'
+ - series: 'kube_pod_info{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",node="minikube",pod="pod-1",service="kube-state-metrics"}'
+ values: '1+0x15'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",phase="Running",pod="pod-1",service="kube-state-metrics"}'
+ values: '1+0x15'
+ - series: 'kube_pod_info{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",node="minikube",pod="pod-2",service="kube-state-metrics"}'
+ values: '1+0x15'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",phase="Running",pod="pod-2",service="kube-state-metrics"}'
+ values: '1+0x15'
+ - series: 'kube_pod_info{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",node="minikube",pod="pod-3",service="kube-state-metrics"}'
+ values: '1+0x15'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="172.17.0.5:8443",job="kube-state-metrics",namespace="kube-system",phase="Running",pod="pod-3",service="kube-state-metrics"}'
+ values: '1+0x15'
+ alert_rule_test:
+ - eval_time: 10m
+ alertname: KubeletTooManyPods
+ - eval_time: 15m
+ alertname: KubeletTooManyPods
+ exp_alerts:
+ - exp_labels:
+ node: minikube
+ severity: info
+ exp_annotations:
+ summary: "Kubelet is running at capacity."
+ description: "Kubelet 'minikube' is running at 100% of its Pod capacity."
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubelettoomanypods
+
+- interval: 1m
+ input_series:
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm"}'
+ values: '0.15+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-2",service="ksm"}'
+ values: '0.15+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm"}'
+ values: '0.1+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm"}'
+ values: '1E9+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-2",service="ksm"}'
+ values: '1E9+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm"}'
+ values: '0.5E9+0x10'
+ # Duplicate kube_pod_status_phase timeseries for the same pod.
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Running",pod="pod-1",service="ksm"}'
+ values: '1 stale'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Pending",pod="pod-1",service="ksm"}'
+ values: '1+0x10'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Completed",pod="pod-2",service="ksm"}'
+ values: '1+0x10'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",phase="Running",pod="pod-1",service="ksm"}'
+ values: '1+0x10'
+ promql_expr_test:
+ - eval_time: 0m
+ expr: namespace_cpu:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 0.15
+ labels: 'namespace_cpu:kube_pod_container_resource_requests:sum{namespace="kube-apiserver"}'
+ - eval_time: 0m
+ expr: namespace_memory:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 1.0e+9
+ labels: 'namespace_memory:kube_pod_container_resource_requests:sum{namespace="kube-apiserver"}'
+ - eval_time: 1m
+ expr: namespace_cpu:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 0.15
+ labels: 'namespace_cpu:kube_pod_container_resource_requests:sum{namespace="kube-apiserver"}'
+ - eval_time: 1m
+ expr: namespace_memory:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 1.0e+9
+ labels: 'namespace_memory:kube_pod_container_resource_requests:sum{namespace="kube-apiserver"}'
+
+- interval: 1m
+ input_series:
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm",cluster="test"}'
+ values: '0.15+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-2",service="ksm",cluster="test"}'
+ values: '0.15+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="cpu",container="kube-apiserver-67",endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm",cluster="test"}'
+ values: '0.1+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm",cluster="test"}'
+ values: '1E9+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-2",service="ksm",cluster="test"}'
+ values: '1E9+0x10'
+ - series: 'kube_pod_container_resource_requests{resource="memory",container="kube-apiserver-67",endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",node="node-1",pod="pod-1",service="ksm",cluster="test"}'
+ values: '0.5E9+0x10'
+ # Duplicate kube_pod_status_phase timeseries for the same pod.
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Running",pod="pod-1",service="ksm",cluster="test"}'
+ values: '1 stale'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Pending",pod="pod-1",service="ksm",cluster="test"}'
+ values: '1+0x10'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-1",job="kube-state-metrics",namespace="kube-apiserver",phase="Completed",pod="pod-2",service="ksm",cluster="test"}'
+ values: '1+0x10'
+ - series: 'kube_pod_status_phase{endpoint="https-main",instance="ksm-2",job="kube-state-metrics",namespace="kube-apiserver",phase="Running",pod="pod-1",service="ksm",cluster="test"}'
+ values: '1+0x10'
+ promql_expr_test:
+ - eval_time: 0m
+ expr: namespace_cpu:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 0.15
+ labels: 'namespace_cpu:kube_pod_container_resource_requests:sum{namespace="kube-apiserver",cluster="test"}'
+ - eval_time: 0m
+ expr: namespace_memory:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 1.0e+9
+ labels: 'namespace_memory:kube_pod_container_resource_requests:sum{namespace="kube-apiserver",cluster="test"}'
+ - eval_time: 1m
+ expr: namespace_cpu:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 0.15
+ labels: 'namespace_cpu:kube_pod_container_resource_requests:sum{namespace="kube-apiserver",cluster="test"}'
+ - eval_time: 1m
+ expr: namespace_memory:kube_pod_container_resource_requests:sum
+ exp_samples:
+ - value: 1.0e+9
+ labels: 'namespace_memory:kube_pod_container_resource_requests:sum{namespace="kube-apiserver",cluster="test"}'
+
+- interval: 1m
+ input_series:
+ # Create a histogram where all of the last 10 samples are in the +Inf (> 10 seconds) bucket.
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.005", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.01", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.025", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.05", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.1", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.25", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="0.5", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="1", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="2.5", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="5", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="10", instance="10.0.2.15:10250"}'
+ values: '1+0x10'
+ - series: 'kubelet_pleg_relist_duration_seconds_bucket{job="kubelet", le="+Inf", instance="10.0.2.15:10250"}'
+ values: '30+1x10'
+ - series: 'kubelet_node_name{endpoint="https-metrics",instance="10.0.2.15:10250",job="kubelet",namespace="kube-system",node="minikube",service="kubelet"}'
+ values: '1 1 1 1 1 1 1 1 1 1'
+ alert_rule_test:
+ - eval_time: 10m
+ alertname: KubeletPlegDurationHigh
+ exp_alerts:
+ - exp_labels:
+ instance: 10.0.2.15:10250
+ node: minikube
+ quantile: 0.99
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet Pod Lifecycle Event Generator is taking too long to relist."
+ description: 'The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of 10 seconds on node minikube.'
+ runbook_url: 'https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletplegdurationhigh'
+
+- interval: 1m
+ input_series:
+ - series: 'kube_node_status_condition{condition="Ready",endpoint="https-main",instance="10.0.2.15:10250",job="kube-state-metrics",namespace="monitoring",node="minikube",pod="kube-state-metrics-b894d84cc-d6htw",service="kube-state-metrics",status="true"}'
+ values: '1 0 1 0 1 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1'
+ alert_rule_test:
+ - eval_time: 18m
+ alertname: KubeNodeReadinessFlapping
+ exp_alerts:
+ - exp_labels:
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Node readiness status is flapping."
+ description: 'The readiness status of node minikube has changed 10 times in the last 15 minutes.'
+ runbook_url: 'https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubenodereadinessflapping'
+
+# Verify that node:node_num_cpu:sum triggers no many-to-many errors.
+- interval: 1m
+ input_series:
+ - series: 'node_cpu_seconds_total{cpu="0",endpoint="https",instance="instance1",job="node-exporter",mode="idle",namespace="openshift-monitoring",pod="node-exporter-1",service="node-exporter"}'
+ values: '1 1'
+ - series: 'node_cpu_seconds_total{cpu="1",endpoint="https",instance="instance1",job="node-exporter",mode="idle",namespace="openshift-monitoring",pod="node-exporter-1",service="node-exporter"}'
+ values: '1 1'
+ - series: 'kube_pod_info{namespace="openshift-monitoring",node="node-1",pod="node-exporter-1",job="kube-state-metrics",instance="10.129.2.7:8443"}'
+ values: '1 1'
+ - series: 'kube_pod_info{namespace="openshift-monitoring",node="node-1",pod="alertmanager-0",job="kube-state-metrics",instance="10.129.2.7:8443"}'
+ values: '1 stale'
+ - series: 'kube_pod_info{namespace="openshift-monitoring",node="node-2",pod="alertmanager-0",job="kube-state-metrics",instance="10.129.2.7:8443"}'
+ values: '1 1'
+ promql_expr_test:
+ - eval_time: 0m
+ expr: node:node_num_cpu:sum
+ exp_samples:
+ - value: 2
+ labels: 'node:node_num_cpu:sum{node="node-1"}'
+ - eval_time: 1m
+ expr: node:node_num_cpu:sum
+ exp_samples:
+ - value: 2
+ labels: 'node:node_num_cpu:sum{node="node-1"}'
+
+# Verify that node:node_num_cpu:sum doesn't trigger many-to-many errors when
+# node_namespace_pod:kube_pod_info: has duplicate entries for the same
+# (namespace,pod) tuple. This can happen when Prometheus is restarted because
+# it didn't add stale markers to the "old" series on shutdown.
+- interval: 1m
+ input_series:
+ - series: 'node_cpu_seconds_total{cpu="0",endpoint="https",instance="instance1",job="node-exporter",mode="idle",namespace="openshift-monitoring",pod="node-exporter-1",service="node-exporter"}'
+ values: '1 1'
+ - series: 'node_cpu_seconds_total{cpu="0",endpoint="https",instance="instance2",job="node-exporter",mode="idle",namespace="openshift-monitoring",pod="node-exporter-2",service="node-exporter"}'
+ values: '1 1'
+ - series: 'node_namespace_pod:kube_pod_info:{node="node-1",namespace="openshift-monitoring",pod="node-exporter-1"}'
+ values: '1 1'
+ - series: 'node_namespace_pod:kube_pod_info:{node="node-2",namespace="openshift-monitoring",pod="node-exporter-2"}'
+ values: '1 1'
+ # series for the "old" prometheus instance.
+ - series: 'node_namespace_pod:kube_pod_info:{node="node-1",namespace="openshift-monitoring",pod="prometheus-0"}'
+ values: '1'
+ # series for the "new" prometheus instance.
+ - series: 'node_namespace_pod:kube_pod_info:{node="node-2",namespace="openshift-monitoring",pod="prometheus-0"}'
+ values: 'stale 1'
+ promql_expr_test:
+ - eval_time: 0m
+ expr: node:node_num_cpu:sum
+ exp_samples:
+ - value: 1
+ labels: 'node:node_num_cpu:sum{node="node-1"}'
+ - value: 1
+ labels: 'node:node_num_cpu:sum{node="node-2"}'
+ - eval_time: 1m
+ expr: node:node_num_cpu:sum
+ exp_samples:
+ - value: 1
+ labels: 'node:node_num_cpu:sum{node="node-1"}'
+ - value: 1
+ labels: 'node:node_num_cpu:sum{node="node-2"}'
+
+
+- interval: 1m
+ input_series:
+ - series: 'kube_pod_owner{endpoint="https",instance="instance1",job="kube-state-metrics",namespace="ns1",owner_is_controller="true",owner_kind="ReplicaSet",owner_name="ds-7cc77d965f",pod="ds-7cc77d965f-cgsdv",service="ksm"}'
+ values: '1 1'
+ - series: 'kube_pod_owner{endpoint="https",instance="instance2",job="kube-state-metrics",namespace="ns1",owner_is_controller="true",owner_kind="ReplicaSet",owner_name="ds-7cc77d965f",pod="ds-7cc77d965f-cgsdv",service="ksm"}'
+ values: '1 stale'
+ - series: 'kube_replicaset_owner{endpoint="https",instance="instance1",job="kube-state-metrics",namespace="ns1",owner_is_controller="true",owner_kind="Deployment",owner_name="ds",pod="ds-777f6bf798-kq7tj",replicaset="ds-7cc77d965f",service="ksm"}'
+ values: '1 1'
+ - series: 'kube_replicaset_owner{endpoint="https",instance="instance2",job="kube-state-metrics",namespace="ns1",owner_is_controller="true",owner_kind="Deployment",owner_name="ds",pod="ds-777f6bf798-kq7tj",replicaset="ds-7cc77d965f",service="ksm"}'
+ values: '1 stale'
+ promql_expr_test:
+ - eval_time: 0m
+ expr: namespace_workload_pod:kube_pod_owner:relabel
+ exp_samples:
+ - value: 1
+ labels: 'namespace_workload_pod:kube_pod_owner:relabel{namespace="ns1", pod="ds-7cc77d965f-cgsdv", workload="ds", workload_type="deployment"}'
+ - eval_time: 1m
+ expr: namespace_workload_pod:kube_pod_owner:relabel
+ exp_samples:
+ - value: 1
+ labels: 'namespace_workload_pod:kube_pod_owner:relabel{namespace="ns1", pod="ds-7cc77d965f-cgsdv", workload="ds", workload_type="deployment"}'
+
+- interval: 1m
+ input_series:
+ - series: 'kube_pod_status_phase{endpoint="https",instance="instance1",job="kube-state-metrics",namespace="ns1",phase="Pending",pod="pod-ds-7cc77d965f-cgsdv",service="ksm"}'
+ values: '1+0x20'
+ - series: 'kube_pod_owner{endpoint="https",instance="instance1",job="kube-state-metrics",namespace="ns1",owner_is_controller="false",owner_kind="<None>",owner_name="ds-7cc77d965f",pod="pod-ds-7cc77d965f-cgsdv",service="ksm"}'
+ values: '1+0x20'
+ - series: 'kube_pod_owner{endpoint="https",instance="instance1",job="kube-state-metrics",namespace="ns1",owner_is_controller="true",owner_kind="ReplicaSet",owner_name="ds-7cc77d965f",pod="pod-ds-7cc77d965f-cgsdv",service="ksm"}'
+ values: '1+0x20'
+ alert_rule_test:
+ - eval_time: 15m
+ alertname: KubePodNotReady
+ exp_alerts:
+ - exp_labels:
+ namespace: ns1
+ pod: pod-ds-7cc77d965f-cgsdv
+ severity: warning
+ exp_annotations:
+ summary: "Pod has been in a non-ready state for more than 15 minutes."
+ description: "Pod ns1/pod-ds-7cc77d965f-cgsdv has been in a non-ready state for longer than 15 minutes."
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodnotready"
+
+- interval: 1m
+ input_series:
+ - series: 'container_cpu_usage_seconds_total{container="alertmanager",cpu="total",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-0",service="kubelet"}'
+ values: '0+3x5'
+ - series: 'container_cpu_usage_seconds_total{container="alertmanager",cpu="total",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-1",service="kubelet"}'
+ values: '0+3x5'
+ # Duplicate timeseries from different instances.
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance2"}'
+ values: '1+0x5'
+ # Missing node label.
+ - series: 'kube_pod_info{namespace="monitoring",pod="alertmanager-main-1",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ promql_expr_test:
+ - eval_time: 5m
+ expr: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate
+ exp_samples:
+ - value: 5.0e-2
+ labels: 'node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace="monitoring", pod="alertmanager-main-0", container="alertmanager", node="node1"}'
+
+- interval: 1m
+ input_series:
+ - series: 'container_memory_working_set_bytes{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-0",service="kubelet"}'
+ values: '1000+0x5'
+ - series: 'container_memory_working_set_bytes{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-1",service="kubelet"}'
+ values: '1000+0x5'
+ # Duplicate timeseries from different instances.
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance2"}'
+ values: '1+0x5'
+ # Missing node label.
+ - series: 'kube_pod_info{namespace="monitoring",pod="alertmanager-main-1",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ promql_expr_test:
+ - eval_time: 5m
+ expr: node_namespace_pod_container:container_memory_working_set_bytes
+ exp_samples:
+ - value: 1.0e+3
+ labels: 'node_namespace_pod_container:container_memory_working_set_bytes{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",node="node1",pod="alertmanager-main-0",service="kubelet"}'
+
+- interval: 1m
+ input_series:
+ - series: 'container_memory_rss{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-0",service="kubelet"}'
+ values: '1000+0x5'
+ - series: 'container_memory_rss{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-1",service="kubelet"}'
+ values: '1000+0x5'
+ # Duplicate timeseries from different instances.
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance2"}'
+ values: '1+0x5'
+ # Missing node label.
+ - series: 'kube_pod_info{namespace="monitoring",pod="alertmanager-main-1",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ promql_expr_test:
+ - eval_time: 5m
+ expr: node_namespace_pod_container:container_memory_rss
+ exp_samples:
+ - value: 1.0e+3
+ labels: 'node_namespace_pod_container:container_memory_rss{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",node="node1",pod="alertmanager-main-0",service="kubelet"}'
+
+- interval: 1m
+ input_series:
+ - series: 'container_memory_cache{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-0",service="kubelet"}'
+ values: '1000+0x5'
+ - series: 'container_memory_cache{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-1",service="kubelet"}'
+ values: '1000+0x5'
+ # Duplicate timeseries from different instances.
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance2"}'
+ values: '1+0x5'
+ # Missing node label.
+ - series: 'kube_pod_info{namespace="monitoring",pod="alertmanager-main-1",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ promql_expr_test:
+ - eval_time: 5m
+ expr: node_namespace_pod_container:container_memory_cache
+ exp_samples:
+ - value: 1.0e+3
+ labels: 'node_namespace_pod_container:container_memory_cache{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",node="node1",pod="alertmanager-main-0",service="kubelet"}'
+
+- interval: 1m
+ input_series:
+ - series: 'container_memory_swap{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-0",service="kubelet"}'
+ values: '1000+0x5'
+ - series: 'container_memory_swap{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",pod="alertmanager-main-1",service="kubelet"}'
+ values: '1000+0x5'
+ # Duplicate timeseries from different instances.
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ - series: 'kube_pod_info{namespace="monitoring",node="node1",pod="alertmanager-main-0",job="kube-state-metrics",instance="instance2"}'
+ values: '1+0x5'
+ # Missing node label.
+ - series: 'kube_pod_info{namespace="monitoring",pod="alertmanager-main-1",job="kube-state-metrics",instance="instance1"}'
+ values: '1+0x5'
+ promql_expr_test:
+ - eval_time: 5m
+ expr: node_namespace_pod_container:container_memory_swap
+ exp_samples:
+ - value: 1.0e+3
+ labels: 'node_namespace_pod_container:container_memory_swap{container="alertmanager",endpoint="https",id="/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3426a9c5_53d6_4736_9ca8_f575828e3e4b.slice/crio-f0d7fb2c909605aad16946ff065a42b25cdcdb812459e712ecdd6bce8a3ed6cb.scope",image="quay.io/prometheus/alertmanager:latest",instance="instance1",job="cadvisor",name="name1",namespace="monitoring",node="node1",pod="alertmanager-main-0",service="kubelet"}'
+- interval: 1m
+ # Current unequal desired and not progressing.
+ input_series:
+ - series: 'kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 3 4 4 4 3 4 4 4 3 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4'
+ - series: 'kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
+ - series: 'kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4'
+ - series: 'kube_daemonset_status_number_available{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 3 3 3 4 3 3 3 4 3 3 3 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4'
+ alert_rule_test:
+ - eval_time: 32m
+ alertname: KubeDaemonSetRolloutStuck
+ - eval_time: 33m
+ alertname: KubeDaemonSetRolloutStuck
+ exp_alerts:
+ - exp_labels:
+ job: kube-state-metrics
+ namespace: monitoring
+ daemonset: node-exporter
+ severity: warning
+ exp_annotations:
+ summary: "DaemonSet rollout is stuck."
+ description: 'DaemonSet monitoring/node-exporter has not finished or progressed for at least 15 minutes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
+ - eval_time: 34m
+ alertname: KubeDaemonSetRolloutStuck
+# KubeDeploymentRolloutStuck
+- interval: 1m
+ input_series:
+ - series: 'kube_deployment_status_condition{job="kube-state-metrics",namespace="monitoring",deployment="stuck", condition="Progressing", status="false"}'
+ values: '1+0x17 0+0x5'
+ alert_rule_test:
+ - eval_time: 14m
+ alertname: KubeDeploymentRolloutStuck
+ - eval_time: 16m
+ alertname: KubeDeploymentRolloutStuck
+ exp_alerts:
+ - exp_labels:
+ job: kube-state-metrics
+ namespace: monitoring
+ deployment: stuck
+ severity: warning
+ condition: Progressing
+ status: "false"
+ exp_annotations:
+ summary: 'Deployment rollout is not progressing.'
+ description: 'Rollout of deployment monitoring/stuck is not progressing for longer than 15 minutes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedeploymentrolloutstuck
+ - eval_time: 18m
+ alertname: KubeDeploymentRolloutStuck
+- interval: 1m
+ # Misscheduled is non zero.
+ input_series:
+ - series: 'kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 3 4 4 4 3 4 4 4 3 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4'
+ - series: 'kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0'
+ - series: 'kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4'
+ - series: 'kube_daemonset_status_number_available{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 3 3 3 4 3 3 3 4 3 3 3 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4'
+ alert_rule_test:
+ - eval_time: 32m
+ alertname: KubeDaemonSetRolloutStuck
+ - eval_time: 33m
+ alertname: KubeDaemonSetRolloutStuck
+ exp_alerts:
+ - exp_labels:
+ job: kube-state-metrics
+ namespace: monitoring
+ daemonset: node-exporter
+ severity: warning
+ exp_annotations:
+ summary: "DaemonSet rollout is stuck."
+ description: 'DaemonSet monitoring/node-exporter has not finished or progressed for at least 15 minutes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
+ - eval_time: 34m
+ alertname: KubeDaemonSetRolloutStuck
+- interval: 1m
+ # Updated number unequal desired.
+ input_series:
+ - series: 'kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 3 4 4 4 3 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
+ - series: 'kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4'
+ - series: 'kube_daemonset_status_number_available{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 3 3 3 4 3 3 3 4 3 3 3 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4'
+ alert_rule_test:
+ - eval_time: 32m
+ alertname: KubeDaemonSetRolloutStuck
+ - eval_time: 33m
+ alertname: KubeDaemonSetRolloutStuck
+ exp_alerts:
+ - exp_labels:
+ job: kube-state-metrics
+ namespace: monitoring
+ daemonset: node-exporter
+ severity: warning
+ exp_annotations:
+ summary: "DaemonSet rollout is stuck."
+ description: 'DaemonSet monitoring/node-exporter has not finished or progressed for at least 15 minutes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
+ - eval_time: 34m
+ alertname: KubeDaemonSetRolloutStuck
+- interval: 1m
+ # Number available unequal desired.
+ input_series:
+ - series: 'kube_daemonset_status_current_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 3 4 4 4 3 4 4 4 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_number_misscheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
+ - series: 'kube_daemonset_status_updated_number_scheduled{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 0 0 0 1 1 1 1 2 2 2 2 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4'
+ - series: 'kube_daemonset_status_number_available{job="kube-state-metrics",namespace="monitoring",daemonset="node-exporter"}'
+ values: '4 4 4 3 3 3 4 3 3 3 4 3 3 3 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4'
+ alert_rule_test:
+ - eval_time: 34m
+ alertname: KubeDaemonSetRolloutStuck
+ - eval_time: 35m
+ alertname: KubeDaemonSetRolloutStuck
+ exp_alerts:
+ - exp_labels:
+ job: kube-state-metrics
+ namespace: monitoring
+ daemonset: node-exporter
+ severity: warning
+ exp_annotations:
+ summary: "DaemonSet rollout is stuck."
+ description: 'DaemonSet monitoring/node-exporter has not finished or progressed for at least 15 minutes.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubedaemonsetrolloutstuck
+ - eval_time: 36m
+ alertname: KubeDaemonSetRolloutStuck
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_certificate_manager_client_ttl_seconds{job="kubelet",namespace="monitoring",node="minikube"}'
+ values: '86400-60x1'
+ alert_rule_test:
+ - eval_time: 0m
+ alertname: KubeletClientCertificateExpiration
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet client certificate is about to expire."
+ description: 'Client certificate for Kubelet on node minikube expires in 1d 0h 0m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificateexpiration
+ - eval_time: 1m
+ alertname: KubeletClientCertificateExpiration
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet client certificate is about to expire."
+ description: 'Client certificate for Kubelet on node minikube expires in 23h 59m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificateexpiration
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: critical
+ exp_annotations:
+ summary: "Kubelet client certificate is about to expire."
+ description: 'Client certificate for Kubelet on node minikube expires in 23h 59m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificateexpiration
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_certificate_manager_server_ttl_seconds{job="kubelet",namespace="monitoring",node="minikube"}'
+ values: '86400-60x1'
+ alert_rule_test:
+ - eval_time: 0m
+ alertname: KubeletServerCertificateExpiration
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet server certificate is about to expire."
+ description: 'Server certificate for Kubelet on node minikube expires in 1d 0h 0m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificateexpiration
+ - eval_time: 1m
+ alertname: KubeletServerCertificateExpiration
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet server certificate is about to expire."
+ description: 'Server certificate for Kubelet on node minikube expires in 23h 59m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificateexpiration
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: critical
+ exp_annotations:
+ summary: "Kubelet server certificate is about to expire."
+ description: 'Server certificate for Kubelet on node minikube expires in 23h 59m 0s.'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificateexpiration
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_certificate_manager_client_expiration_renew_errors{job="kubelet",namespace="monitoring",node="minikube"}'
+ values: '0+1x20'
+ alert_rule_test:
+ - eval_time: 16m
+ alertname: KubeletClientCertificateRenewalErrors
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet has failed to renew its client certificate."
+ description: 'Kubelet on node minikube has failed to renew its client certificate (5 errors in the last 5 minutes).'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletclientcertificaterenewalerrors
+
+
+- interval: 1m
+ input_series:
+ - series: 'kubelet_server_expiration_renew_errors{job="kubelet",namespace="monitoring",node="minikube"}'
+ values: '0+1x20'
+ alert_rule_test:
+ - eval_time: 16m
+ alertname: KubeletServerCertificateRenewalErrors
+ exp_alerts:
+ - exp_labels:
+ job: kubelet
+ namespace: monitoring
+ node: minikube
+ severity: warning
+ exp_annotations:
+ summary: "Kubelet has failed to renew its server certificate."
+ description: 'Kubelet on node minikube has failed to renew its server certificate (5 errors in the last 5 minutes).'
+ runbook_url: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeletservercertificaterenewalerrors
+
+- interval: 1m
+ input_series:
+ - series: 'kube_job_failed{instance="instance1",condition="true",job="kube-state-metrics",job_name="job-1597623120",namespace="ns1"}'
+ values: '1+0x20'
+ alert_rule_test:
+ - eval_time: 15m
+ alertname: KubeJobFailed
+ exp_alerts:
+ - exp_labels:
+ namespace: ns1
+ job_name: job-1597623120
+ severity: warning
+ condition: true
+ instance: instance1
+ job: kube-state-metrics
+ exp_annotations:
+ summary: "Job failed to complete."
+ description: "Job ns1/job-1597623120 failed to complete. Removing failed job after investigation should clear this alert."
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobfailed"
+
+- interval: 1m
+ input_series:
+ - series: 'kube_job_status_start_time{namespace="ns1", job="kube-state-metrics", instance="instance1", job_name="job1"}'
+ values: '0+0x200 _x500 0+0x40'
+ - series: 'kube_job_status_active{namespace="ns1", job="kube-state-metrics", instance="instance1", job_name="job1"}'
+ values: '1x200 _x500 1x40'
+ alert_rule_test:
+ - eval_time: 6h
+ alertname: KubeJobNotCompleted
+ - eval_time: 12h1m
+ alertname: KubeJobNotCompleted
+ exp_alerts:
+ - exp_labels:
+ namespace: ns1
+ job_name: job1
+ severity: warning
+ exp_annotations:
+ summary: "Job did not complete in time"
+ description: "Job ns1/job1 is taking more than 12h 0m 0s to complete."
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubejobnotcompleted"
+
+- interval: 1m
+ input_series:
+ - series: 'kube_job_status_start_time{namespace="ns1", job="kube-state-metrics", instance="instance1", job_name="job1"}'
+ values: '0+0x740'
+ - series: 'kube_job_status_active{namespace="ns1", job="kube-state-metrics", instance="instance1", job_name="job1"}'
+ values: '1+0x710 0x30'
+ alert_rule_test:
+ - eval_time: 6h
+ alertname: KubeJobNotCompleted
+ - eval_time: 12h
+ alertname: KubeJobNotCompleted
+
+- interval: 1m
+ input_series:
+ - series: 'apiserver_request_terminations_total{job="kube-apiserver",apiserver="kube-apiserver"}'
+ values: '1+1x10'
+ - series: 'apiserver_request_total{job="kube-apiserver",apiserver="kube-apiserver"}'
+ values: '1+2x10'
+ alert_rule_test:
+ - eval_time: 5m # alert hasn't fired
+ alertname: KubeAPITerminatedRequests
+ - eval_time: 10m # alert fired
+ alertname: KubeAPITerminatedRequests
+ exp_alerts:
+ - exp_labels:
+ severity: warning
+ exp_annotations:
+ summary: "The kubernetes apiserver has terminated 33.33% of its incoming requests."
+ description: "The kubernetes apiserver has terminated 33.33% of its incoming requests."
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubeapiterminatedrequests"
+
+- interval: 1m
+ input_series:
+ - series: 'kube_pod_container_status_waiting_reason{reason="CrashLoopBackOff",namespace="test",pod="static-web",container="script",job="kube-state-metrics"}'
+ values: '1 1 stale _x3 1 1 stale _x2 1+0x4 stale'
+ alert_rule_test:
+ - eval_time: 10m # alert hasn't fired
+ alertname: KubePodCrashLooping
+ - eval_time: 16m # alert fired
+ alertname: KubePodCrashLooping
+ exp_alerts:
+ - exp_labels:
+ severity: "warning"
+ container: "script"
+ job: "kube-state-metrics"
+ namespace: "test"
+ pod: "static-web"
+ reason: "CrashLoopBackOff"
+ exp_annotations:
+ description: 'Pod test/static-web (script) is in waiting state (reason: "CrashLoopBackOff").'
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping"
+ summary: "Pod is crash looping."
+ - eval_time: 20m
+ alertname: KubePodCrashLooping # alert fired for a period of 5 minutes after resolution because the alert looks back at the last 5 minutes of data and the range vector doesn't take stale samples into account
+ exp_alerts:
+ - exp_labels:
+ severity: "warning"
+ container: "script"
+ job: "kube-state-metrics"
+ namespace: "test"
+ pod: "static-web"
+ reason: "CrashLoopBackOff"
+ exp_annotations:
+ description: 'Pod test/static-web (script) is in waiting state (reason: "CrashLoopBackOff").'
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubepodcrashlooping"
+ summary: "Pod is crash looping."
+ - eval_time: 21m # alert recovers
+ alertname: KubePodCrashLooping
+
+# When ResourceQuota has both cpu and requests.cpu, min value of those will be taken into account for quota calculation.
+- interval: 1m
+ input_series:
+ - series: 'kube_resourcequota{namespace="test", resource="cpu", type="hard", job="kube-state-metrics"}'
+ values: '1000x10'
+ - series: 'kube_resourcequota{namespace="test", resource="requests.cpu", type="hard", job="kube-state-metrics"}'
+ values: '100x10'
+ - series: 'kube_resourcequota{namespace="test1", resource="requests.cpu", type="hard", job="kube-state-metrics"}'
+ values: '50x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n1", resource="cpu", job="kube-state-metrics"}'
+ values: '100x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n2", resource="cpu", job="kube-state-metrics"}'
+ values: '100x10'
+ alert_rule_test:
+ - eval_time: 4m
+ alertname: KubeCPUQuotaOvercommit
+ - eval_time: 5m # alert shouldn't fire
+ alertname: KubeCPUQuotaOvercommit
+- interval: 1m
+ input_series:
+ - series: 'kube_resourcequota{namespace="test", resource="cpu", type="hard", job="kube-state-metrics"}'
+ values: '1000x10'
+ - series: 'kube_resourcequota{namespace="test", resource="requests.cpu", type="hard", job="kube-state-metrics"}'
+ values: '200x10'
+ - series: 'kube_resourcequota{namespace="test1", resource="requests.cpu", type="hard", job="kube-state-metrics"}'
+ values: '200x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n1", resource="cpu", job="kube-state-metrics"}'
+ values: '100x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n2", resource="cpu", job="kube-state-metrics"}'
+ values: '100x10'
+ alert_rule_test:
+ - eval_time: 4m
+ alertname: KubeCPUQuotaOvercommit
+ - eval_time: 5m # alert shouldn't fire
+ alertname: KubeCPUQuotaOvercommit
+ exp_alerts:
+ - exp_labels:
+ severity: "warning"
+ exp_annotations:
+ description: 'Cluster has overcommitted CPU resource requests for Namespaces.'
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubecpuquotaovercommit"
+ summary: "Cluster has overcommitted CPU resource requests."
+
+# When ResourceQuota has both memory and requests.memory, min value of those will be taken into account for quota calculation.
+- interval: 1m
+ input_series:
+ - series: 'kube_resourcequota{namespace="test", resource="memory", type="hard", job="kube-state-metrics"}'
+ values: '1000x10'
+ - series: 'kube_resourcequota{namespace="test", resource="requests.memory", type="hard", job="kube-state-metrics"}'
+ values: '100x10'
+ - series: 'kube_resourcequota{namespace="test1", resource="requests.memory", type="hard", job="kube-state-metrics"}'
+ values: '50x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n1", resource="memory", job="kube-state-metrics"}'
+ values: '100x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n2", resource="memory", job="kube-state-metrics"}'
+ values: '100x10'
+ alert_rule_test:
+ - eval_time: 4m
+ alertname: KubeMemoryQuotaOvercommit
+ - eval_time: 5m # alert shouldn't fire
+ alertname: KubeMemoryQuotaOvercommit
+- interval: 1m
+ input_series:
+ - series: 'kube_resourcequota{namespace="test", resource="memory", type="hard", job="kube-state-metrics"}'
+ values: '1000x10'
+ - series: 'kube_resourcequota{namespace="test", resource="requests.memory", type="hard", job="kube-state-metrics"}'
+ values: '500x10'
+ - series: 'kube_resourcequota{namespace="test1", resource="requests.memory", type="hard", job="kube-state-metrics"}'
+ values: '500x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n1", resource="memory", job="kube-state-metrics"}'
+ values: '10x10'
+ - series: 'kube_node_status_allocatable{namespace="monitoring", node="n2", resource="memory", job="kube-state-metrics"}'
+ values: '10x10'
+ alert_rule_test:
+ - eval_time: 4m
+ alertname: KubeMemoryQuotaOvercommit
+ - eval_time: 5m # alert shouldn't fire
+ alertname: KubeMemoryQuotaOvercommit
+ exp_alerts:
+ - exp_labels:
+ severity: "warning"
+ exp_annotations:
+ description: 'Cluster has overcommitted memory resource requests for Namespaces.'
+ runbook_url: "https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md#alert-name-kubememoryquotaovercommit"
+ summary: "Cluster has overcommitted memory resource requests."
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.gitignore b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.gitignore
new file mode 100644
index 0000000..09527a3
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.gitignore
@@ -0,0 +1,2 @@
+vendor
+dashboards_out
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.lint b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.lint
new file mode 100644
index 0000000..59542ce
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/.lint
@@ -0,0 +1,15 @@
+exclusions:
+ target-instance-rule:
+ reason: no need to have every query contains two matchers within every selector - `{job=~"$job", instance=~"$instance"}`
+ template-job-rule:
+ entries:
+ - dashboard: Alertmanager / Overview
+ reason: multi-select is not always required
+ template-instance-rule:
+ entries:
+ - dashboard: Alertmanager / Overview
+ reason: multi-select is not always required
+ panel-units-rule:
+ entries:
+ - dashboard: Alertmanager / Overview
+ reason: Dashboard does not benefit from specific unit specification.
\ No newline at end of file
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/Makefile b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/Makefile
new file mode 100644
index 0000000..9ce9a9f
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/Makefile
@@ -0,0 +1,31 @@
+JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 1 --string-style s --comment-style s
+ALERTMANAGER_ALERTS := alertmanager_alerts.yaml
+
+default: vendor build dashboards_out
+
+all: fmt build
+
+vendor:
+ jb install
+
+fmt:
+ find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ xargs -n 1 -- $(JSONNET_FMT) -i
+
+lint: build
+ find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ while read f; do \
+ $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \
+ done
+
+ mixtool lint mixin.libsonnet
+
+dashboards_out: mixin.libsonnet config.libsonnet $(wildcard dashboards/*)
+ @mkdir -p dashboards_out
+ jsonnet -J vendor -m dashboards_out dashboards.jsonnet
+
+build: vendor
+ mixtool generate alerts mixin.libsonnet > $(ALERTMANAGER_ALERTS)
+
+clean:
+ rm -rf $(ALERTMANAGER_ALERTS)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/README.md b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/README.md
new file mode 100644
index 0000000..d3190d7
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/README.md
@@ -0,0 +1,30 @@
+# Alertmanager Mixin
+
+The Alertmanager Mixin is a set of configurable, reusable, and extensible
+alerts (and eventually dashboards) for Alertmanager.
+
+The alerts are designed to monitor a cluster of Alertmanager instances. To make
+them work as expected, the Prometheus server the alerts are evaluated on has to
+scrape all Alertmanager instances of the cluster, even if those instances are
+distributed over different locations. All Alertmanager instances in the same
+Alertmanager cluster must have the same `job` label. In turn, if monitoring
+multiple different Alertmanager clusters, instances from different clusters
+must have a different `job` label.
+
+The most basic use of the Alertmanager Mixin is to create a YAML file with the
+alerts from it. To do so, you need to have `jsonnetfmt` and `mixtool` installed. If you have a working Go development environment, it's
+easiest to run the following:
+
+```bash
+$ go get github.com/monitoring-mixins/mixtool/cmd/mixtool
+$ go get github.com/google/go-jsonnet/cmd/jsonnetfmt
+```
+
+Edit `config.libsonnet` to match your environment and then build
+`alertmanager_alerts.yaml` with the alerts by running:
+
+```bash
+$ make build
+```
+
+For instructions on more advanced uses of mixins, see https://github.com/monitoring-mixins/docs.
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.jsonnet
new file mode 100644
index 0000000..75e7c1b
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.jsonnet
@@ -0,0 +1 @@
+std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.libsonnet
new file mode 100644
index 0000000..720e411
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/alerts.libsonnet
@@ -0,0 +1,169 @@
+{
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'alertmanager.rules',
+ rules: [
+ {
+ alert: 'AlertmanagerFailedReload',
+ expr: |||
+ # Without max_over_time, failed scrapes could create false negatives, see
+ # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
+ max_over_time(alertmanager_config_last_reload_successful{%(alertmanagerSelector)s}[5m]) == 0
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'Reloading an Alertmanager configuration has failed.',
+ description: 'Configuration has failed to load for %(alertmanagerName)s.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerMembersInconsistent',
+ expr: |||
+ # Without max_over_time, failed scrapes could create false negatives, see
+ # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details.
+ max_over_time(alertmanager_cluster_members{%(alertmanagerSelector)s}[5m])
+ < on (%(alertmanagerClusterLabels)s) group_left
+ count by (%(alertmanagerClusterLabels)s) (max_over_time(alertmanager_cluster_members{%(alertmanagerSelector)s}[5m]))
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'A member of an Alertmanager cluster has not found all other cluster members.',
+ description: 'Alertmanager %(alertmanagerName)s has only found {{ $value }} members of the %(alertmanagerClusterName)s cluster.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerFailedToSendAlerts',
+ expr: |||
+ (
+ rate(alertmanager_notifications_failed_total{%(alertmanagerSelector)s}[5m])
+ /
+ rate(alertmanager_notifications_total{%(alertmanagerSelector)s}[5m])
+ )
+ > 0.01
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'An Alertmanager instance failed to send notifications.',
+ description: 'Alertmanager %(alertmanagerName)s failed to send {{ $value | humanizePercentage }} of notifications to {{ $labels.integration }}.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerClusterFailedToSendAlerts',
+ expr: |||
+ min by (%(alertmanagerClusterLabels)s, integration) (
+ rate(alertmanager_notifications_failed_total{%(alertmanagerSelector)s, integration=~`%(alertmanagerCriticalIntegrationsRegEx)s`}[5m])
+ /
+ rate(alertmanager_notifications_total{%(alertmanagerSelector)s, integration=~`%(alertmanagerCriticalIntegrationsRegEx)s`}[5m])
+ )
+ > 0.01
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'All Alertmanager instances in a cluster failed to send notifications to a critical integration.',
+ description: 'The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the %(alertmanagerClusterName)s cluster is {{ $value | humanizePercentage }}.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerClusterFailedToSendAlerts',
+ expr: |||
+ min by (%(alertmanagerClusterLabels)s, integration) (
+ rate(alertmanager_notifications_failed_total{%(alertmanagerSelector)s, integration!~`%(alertmanagerCriticalIntegrationsRegEx)s`}[5m])
+ /
+ rate(alertmanager_notifications_total{%(alertmanagerSelector)s, integration!~`%(alertmanagerCriticalIntegrationsRegEx)s`}[5m])
+ )
+ > 0.01
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'All Alertmanager instances in a cluster failed to send notifications to a non-critical integration.',
+ description: 'The minimum notification failure rate to {{ $labels.integration }} sent from any instance in the %(alertmanagerClusterName)s cluster is {{ $value | humanizePercentage }}.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerConfigInconsistent',
+ expr: |||
+ count by (%(alertmanagerClusterLabels)s) (
+ count_values by (%(alertmanagerClusterLabels)s) ("config_hash", alertmanager_config_hash{%(alertmanagerSelector)s})
+ )
+ != 1
+ ||| % $._config,
+ 'for': '20m', // A config change across an Alertmanager cluster can take its time. But it's really bad if it persists for too long.
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'Alertmanager instances within the same cluster have different configurations.',
+ description: 'Alertmanager instances within the %(alertmanagerClusterName)s cluster have different configurations.' % $._config,
+ },
+ },
+ // Both the following critical alerts, AlertmanagerClusterDown and
+ // AlertmanagerClusterCrashlooping, fire if a whole cluster is
+ // unhealthy. It is implied that a generic warning alert is in place
+ // for individual instances being down or crashlooping.
+ {
+ alert: 'AlertmanagerClusterDown',
+ expr: |||
+ (
+ count by (%(alertmanagerClusterLabels)s) (
+ avg_over_time(up{%(alertmanagerSelector)s}[5m]) < 0.5
+ )
+ /
+ count by (%(alertmanagerClusterLabels)s) (
+ up{%(alertmanagerSelector)s}
+ )
+ )
+ >= 0.5
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'Half or more of the Alertmanager instances within the same cluster are down.',
+ description: '{{ $value | humanizePercentage }} of Alertmanager instances within the %(alertmanagerClusterName)s cluster have been up for less than half of the last 5m.' % $._config,
+ },
+ },
+ {
+ alert: 'AlertmanagerClusterCrashlooping',
+ expr: |||
+ (
+ count by (%(alertmanagerClusterLabels)s) (
+ changes(process_start_time_seconds{%(alertmanagerSelector)s}[10m]) > 4
+ )
+ /
+ count by (%(alertmanagerClusterLabels)s) (
+ up{%(alertmanagerSelector)s}
+ )
+ )
+ >= 0.5
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'Half or more of the Alertmanager instances within the same cluster are crashlooping.',
+ description: '{{ $value | humanizePercentage }} of Alertmanager instances within the %(alertmanagerClusterName)s cluster have restarted at least 5 times in the last 10m.' % $._config,
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/config.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/config.libsonnet
new file mode 100644
index 0000000..d0a2892
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/config.libsonnet
@@ -0,0 +1,44 @@
+{
+ _config+:: {
+ local c = self,
+ // alertmanagerSelector is inserted as part of the label selector in
+ // PromQL queries to identify metrics collected from Alertmanager
+ // servers.
+ alertmanagerSelector: 'job="alertmanager"',
+
+ // alertmanagerClusterLabels is a string with comma-separated
+ // labels that are common labels of instances belonging to the
+ // same Alertmanager cluster. Include not only enough labels to
+ // identify cluster members, but also all common labels you want
+ // to keep for resulting cluster-level alerts.
+ alertmanagerClusterLabels: 'job',
+
+ // alertmanagerNameLabels is a string with comma-separated
+ // labels used to identify different alertmanagers within the same
+ // Alertmanager HA cluster.
+ // If you run Alertmanager on Kubernetes with the Prometheus
+ // Operator, you can make use of the configured target labels for
+ // nicer naming:
+ // alertmanagerNameLabels: 'namespace,pod'
+ alertmanagerNameLabels: 'instance',
+
+ // alertmanagerName is an identifier for alerts. By default, it is built from 'alertmanagerNameLabels'.
+ alertmanagerName: std.join('/', ['{{$labels.%s}}' % [label] for label in std.split(c.alertmanagerNameLabels, ',')]),
+
+ // alertmanagerClusterName is inserted into annotations to name an
+ // Alertmanager cluster. All labels used here must also be present
+ // in alertmanagerClusterLabels above.
+ alertmanagerClusterName: '{{$labels.job}}',
+
+ // alertmanagerCriticalIntegrationsRegEx is matched against the
+ // value of the `integration` label to determine if the
+ // AlertmanagerClusterFailedToSendAlerts is critical or merely a
+ // warning. This can be used to avoid paging about a failed
+ // integration that is itself not used for critical alerts.
+ // Example: @'pagerduty|webhook'
+ alertmanagerCriticalIntegrationsRegEx: @'.*',
+
+ dashboardNamePrefix: 'Alertmanager / ',
+ dashboardTags: ['alertmanager-mixin'],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.jsonnet
new file mode 100644
index 0000000..9d913ed
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.jsonnet
@@ -0,0 +1,6 @@
+local dashboards = (import 'mixin.libsonnet').grafanaDashboards;
+
+{
+ [name]: dashboards[name]
+ for name in std.objectFields(dashboards)
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.libsonnet
new file mode 100644
index 0000000..e4cc41c
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards.libsonnet
@@ -0,0 +1 @@
+(import './dashboards/overview.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards/overview.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards/overview.libsonnet
new file mode 100644
index 0000000..27c02d2
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/dashboards/overview.libsonnet
@@ -0,0 +1,154 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+
+{
+ grafanaDashboards+:: {
+
+ local amQuerySelector = std.join(',', ['%s=~"$%s"' % [label, label] for label in std.split($._config.alertmanagerClusterLabels, ',')]),
+ local amNameDashboardLegend = std.join('/', ['{{%s}}' % [label] for label in std.split($._config.alertmanagerNameLabels, ',')]),
+
+ local alertmanagerClusterSelectorTemplates =
+ [
+ template.new(
+ name=label,
+ label=label,
+ datasource='$datasource',
+ query='label_values(alertmanager_alerts, %s)' % label,
+ current='',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ )
+ for label in std.split($._config.alertmanagerClusterLabels, ',')
+ ],
+
+ local integrationTemplate =
+ template.new(
+ name='integration',
+ datasource='$datasource',
+ query='label_values(alertmanager_notifications_total{integration=~"%s"}, integration)' % $._config.alertmanagerCriticalIntegrationsRegEx,
+ current='all',
+ hide='2', // Always hide
+ refresh=2,
+ includeAll=true,
+ sort=1
+ ),
+
+ 'alertmanager-overview.json':
+ local alerts =
+ graphPanel.new(
+ 'Alerts',
+ description='current set of alerts stored in the Alertmanager',
+ datasource='$datasource',
+ span=6,
+ format='none',
+ stack=true,
+ fill=1,
+ legend_show=false,
+ )
+ .addTarget(prometheus.target('sum(alertmanager_alerts{%(amQuerySelector)s}) by (%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)' % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s' % $._config { amNameDashboardLegend: amNameDashboardLegend }));
+
+ local alertsRate =
+ graphPanel.new(
+ 'Alerts receive rate',
+ description='rate of successful and invalid alerts received by the Alertmanager',
+ datasource='$datasource',
+ span=6,
+ format='ops',
+ stack=true,
+ fill=1,
+ legend_show=false,
+ )
+ .addTarget(prometheus.target('sum(rate(alertmanager_alerts_received_total{%(amQuerySelector)s}[$__rate_interval])) by (%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)' % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Received' % $._config { amNameDashboardLegend: amNameDashboardLegend }))
+ .addTarget(prometheus.target('sum(rate(alertmanager_alerts_invalid_total{%(amQuerySelector)s}[$__rate_interval])) by (%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)' % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Invalid' % $._config { amNameDashboardLegend: amNameDashboardLegend }));
+
+ local notifications =
+ graphPanel.new(
+ '$integration: Notifications Send Rate',
+ description='rate of successful and invalid notifications sent by the Alertmanager',
+ datasource='$datasource',
+ format='ops',
+ stack=true,
+ fill=1,
+ legend_show=false,
+ repeat='integration'
+ )
+ .addTarget(prometheus.target('sum(rate(alertmanager_notifications_total{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (integration,%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)' % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Total' % $._config { amNameDashboardLegend: amNameDashboardLegend }))
+ .addTarget(prometheus.target('sum(rate(alertmanager_notifications_failed_total{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (integration,%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)' % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Failed' % $._config { amNameDashboardLegend: amNameDashboardLegend }));
+
+ local notificationDuration =
+ graphPanel.new(
+ '$integration: Notification Duration',
+ description='latency of notifications sent by the Alertmanager',
+ datasource='$datasource',
+ format='s',
+ stack=false,
+ fill=1,
+ legend_show=false,
+ repeat='integration'
+ )
+ .addTarget(prometheus.target(
+ |||
+ histogram_quantile(0.99,
+ sum(rate(alertmanager_notification_latency_seconds_bucket{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (le,%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)
+ )
+ ||| % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s 99th Percentile' % $._config { amNameDashboardLegend: amNameDashboardLegend }
+ ))
+ .addTarget(prometheus.target(
+ |||
+ histogram_quantile(0.50,
+ sum(rate(alertmanager_notification_latency_seconds_bucket{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (le,%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)
+ )
+ ||| % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Median' % $._config { amNameDashboardLegend: amNameDashboardLegend }
+ ))
+ .addTarget(prometheus.target(
+ |||
+ sum(rate(alertmanager_notification_latency_seconds_sum{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)
+ /
+ sum(rate(alertmanager_notification_latency_seconds_count{%(amQuerySelector)s, integration="$integration"}[$__rate_interval])) by (%(alertmanagerClusterLabels)s,%(alertmanagerNameLabels)s)
+ ||| % $._config { amQuerySelector: amQuerySelector }, legendFormat='%(amNameDashboardLegend)s Average' % $._config { amNameDashboardLegend: amNameDashboardLegend }
+ ));
+
+ dashboard.new(
+ '%sOverview' % $._config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=($._config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair',
+ uid='alertmanager-overview'
+ )
+ .addTemplate(
+ {
+ current: {
+ text: 'Prometheus',
+ value: 'Prometheus',
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: '',
+ type: 'datasource',
+ },
+ )
+ .addTemplates(alertmanagerClusterSelectorTemplates)
+ .addTemplate(integrationTemplate)
+ .addRow(
+ row.new('Alerts')
+ .addPanel(alerts)
+ .addPanel(alertsRate)
+ )
+ .addRow(
+ row.new('Notifications')
+ .addPanel(notifications)
+ .addPanel(notificationDuration)
+ ),
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.json b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.json
new file mode 100644
index 0000000..650733a
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.json
@@ -0,0 +1,15 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet"
+ }
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.lock.json b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.lock.json
new file mode 100644
index 0000000..803febc
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/jsonnetfile.lock.json
@@ -0,0 +1,16 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet"
+ }
+ },
+ "version": "55cf4ee53ced2b6d3ce96ecce9fb813b4465be98",
+ "sum": "4/sUV0Kk+o8I+wlYxL9R6EPhL/NiLfYHk+NXlU64RUk="
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet
new file mode 100644
index 0000000..22db15c
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/alertmanager/doc/alertmanager-mixin/mixin.libsonnet
@@ -0,0 +1,3 @@
+(import 'config.libsonnet') +
+(import 'alerts.libsonnet') +
+(import 'dashboards.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/mixin.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/mixin.libsonnet
index 9515cde..2aa5dd4 100644
--- a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/mixin.libsonnet
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/mysqld_exporter/mysqld-mixin/mixin.libsonnet
@@ -6,7 +6,7 @@
// Helper function to ensure that we don't override other rules, by forcing
// the patching of the groups list, and not the overall rules object.
local importRules(rules) = {
- groups+: std.native('parseYaml')(rules)[0].groups,
+ groups+: std.parseYaml(rules).groups,
},
prometheusRules+: importRules(importstr 'rules/rules.yaml'),
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/.gitignore b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/.gitignore
new file mode 100644
index 0000000..522b99f
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/.gitignore
@@ -0,0 +1,4 @@
+jsonnetfile.lock.json
+vendor
+*.yaml
+dashboards_out
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/Makefile b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/Makefile
new file mode 100644
index 0000000..d04b37d
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/Makefile
@@ -0,0 +1,32 @@
+JSONNET_FMT := jsonnetfmt -n 2 --max-blank-lines 2 --string-style s --comment-style s
+
+all: fmt node_alerts.yaml node_rules.yaml dashboards_out lint
+
+fmt:
+ find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ xargs -n 1 -- $(JSONNET_FMT) -i
+
+node_alerts.yaml: mixin.libsonnet config.libsonnet $(wildcard alerts/*)
+ jsonnet -S alerts.jsonnet > $@
+
+node_rules.yaml: mixin.libsonnet config.libsonnet $(wildcard rules/*)
+ jsonnet -S rules.jsonnet > $@
+
+dashboards_out: mixin.libsonnet config.libsonnet $(wildcard dashboards/*)
+ @mkdir -p dashboards_out
+ jsonnet -J vendor -m dashboards_out dashboards.jsonnet
+
+lint: node_alerts.yaml node_rules.yaml
+ find . -name 'vendor' -prune -o -name '*.libsonnet' -print -o -name '*.jsonnet' -print | \
+ while read f; do \
+ $(JSONNET_FMT) "$$f" | diff -u "$$f" -; \
+ done
+
+ promtool check rules node_alerts.yaml node_rules.yaml
+
+.PHONY: jb_install
+jb_install:
+ jb install
+
+clean:
+ rm -rf dashboards_out node_alerts.yaml node_rules.yaml
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/README.md b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/README.md
new file mode 100644
index 0000000..824385e
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/README.md
@@ -0,0 +1,45 @@
+# Node Mixin
+
+_This is a work in progress. We aim for it to become a good role model for alerts
+and dashboards eventually, but it is not quite there yet._
+
+The Node Mixin is a set of configurable, reusable, and extensible alerts and
+dashboards based on the metrics exported by the Node Exporter. The mixin creates
+recording and alerting rules for Prometheus and suitable dashboard descriptions
+for Grafana.
+
+To use them, you need to have `jsonnet` (v0.16+) and `jb` installed. If you
+have a working Go development environment, it's easiest to run the following:
+
+```bash
+go install github.com/google/go-jsonnet/cmd/jsonnet@latest
+go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
+go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
+```
+
+Next, install the dependencies by running the following command in this
+directory:
+
+```bash
+jb install
+```
+
+You can then build the Prometheus rules files `node_alerts.yaml` and
+`node_rules.yaml`:
+
+```bash
+make node_alerts.yaml node_rules.yaml
+```
+
+You can also build a directory `dashboard_out` with the JSON dashboard files
+for Grafana:
+
+```bash
+make dashboards_out
+```
+
+Note that some of the generated dashboards require recording rules specified in
+the previously generated `node_rules.yaml`.
+
+For more advanced uses of mixins, see
+<https://github.com/monitoring-mixins/docs>.
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts.jsonnet
new file mode 100644
index 0000000..75e7c1b
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts.jsonnet
@@ -0,0 +1 @@
+std.manifestYamlDoc((import 'mixin.libsonnet').prometheusAlerts)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet
new file mode 100644
index 0000000..1eaedd3
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/alerts/alerts.libsonnet
@@ -0,0 +1,414 @@
+{
+ prometheusAlerts+:: {
+ groups+: [
+ {
+ name: 'node-exporter',
+ rules: [
+ {
+ alert: 'NodeFilesystemSpaceFillingUp',
+ expr: |||
+ (
+ node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpWarningThreshold)d
+ and
+ predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Filesystem is predicted to run out of space within the next 24 hours.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemSpaceFillingUp',
+ expr: |||
+ (
+ node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceFillingUpCriticalThreshold)d
+ and
+ predict_linear(node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: '%(nodeCriticalSeverity)s' % $._config,
+ },
+ annotations: {
+ summary: 'Filesystem is predicted to run out of space within the next 4 hours.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left and is filling up fast.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemAlmostOutOfSpace',
+ expr: |||
+ (
+ node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableWarningThreshold)d
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '30m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Filesystem has less than %(fsSpaceAvailableWarningThreshold)d%% space left.' % $._config,
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemAlmostOutOfSpace',
+ expr: |||
+ (
+ node_filesystem_avail_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_size_bytes{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < %(fsSpaceAvailableCriticalThreshold)d
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '30m',
+ labels: {
+ severity: '%(nodeCriticalSeverity)s' % $._config,
+ },
+ annotations: {
+ summary: 'Filesystem has less than %(fsSpaceAvailableCriticalThreshold)d%% space left.' % $._config,
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available space left.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemFilesFillingUp',
+ expr: |||
+ (
+ node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 40
+ and
+ predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 24*60*60) < 0
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Filesystem is predicted to run out of inodes within the next 24 hours.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemFilesFillingUp',
+ expr: |||
+ (
+ node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 20
+ and
+ predict_linear(node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s}[6h], 4*60*60) < 0
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: '%(nodeCriticalSeverity)s' % $._config,
+ },
+ annotations: {
+ summary: 'Filesystem is predicted to run out of inodes within the next 4 hours.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left and is filling up fast.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemAlmostOutOfFiles',
+ expr: |||
+ (
+ node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 5
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Filesystem has less than 5% inodes left.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.',
+ },
+ },
+ {
+ alert: 'NodeFilesystemAlmostOutOfFiles',
+ expr: |||
+ (
+ node_filesystem_files_free{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} / node_filesystem_files{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} * 100 < 3
+ and
+ node_filesystem_readonly{%(nodeExporterSelector)s,%(fsSelector)s,%(fsMountpointSelector)s} == 0
+ )
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: '%(nodeCriticalSeverity)s' % $._config,
+ },
+ annotations: {
+ summary: 'Filesystem has less than 3% inodes left.',
+ description: 'Filesystem on {{ $labels.device }}, mounted on {{ $labels.mountpoint }}, at {{ $labels.instance }} has only {{ printf "%.2f" $value }}% available inodes left.',
+ },
+ },
+ {
+ alert: 'NodeNetworkReceiveErrs',
+ expr: |||
+ rate(node_network_receive_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_receive_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Network interface is reporting many receive errors.',
+ description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} receive errors in the last two minutes.',
+ },
+ },
+ {
+ alert: 'NodeNetworkTransmitErrs',
+ expr: |||
+ rate(node_network_transmit_errs_total{%(nodeExporterSelector)s}[2m]) / rate(node_network_transmit_packets_total{%(nodeExporterSelector)s}[2m]) > 0.01
+ ||| % $._config,
+ 'for': '1h',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Network interface is reporting many transmit errors.',
+ description: '{{ $labels.instance }} interface {{ $labels.device }} has encountered {{ printf "%.0f" $value }} transmit errors in the last two minutes.',
+ },
+ },
+ {
+ alert: 'NodeHighNumberConntrackEntriesUsed',
+ expr: |||
+ (node_nf_conntrack_entries{%(nodeExporterSelector)s} / node_nf_conntrack_entries_limit) > 0.75
+ ||| % $._config,
+ annotations: {
+ summary: 'Number of conntrack are getting close to the limit.',
+ description: '{{ $value | humanizePercentage }} of conntrack entries are used.',
+ },
+ labels: {
+ severity: 'warning',
+ },
+ },
+ {
+ alert: 'NodeTextFileCollectorScrapeError',
+ expr: |||
+ node_textfile_scrape_error{%(nodeExporterSelector)s} == 1
+ ||| % $._config,
+ annotations: {
+ summary: 'Node Exporter text file collector failed to scrape.',
+ description: 'Node Exporter text file collector on {{ $labels.instance }} failed to scrape.',
+ },
+ labels: {
+ severity: 'warning',
+ },
+ },
+ {
+ alert: 'NodeClockSkewDetected',
+ expr: |||
+ (
+ node_timex_offset_seconds{%(nodeExporterSelector)s} > 0.05
+ and
+ deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) >= 0
+ )
+ or
+ (
+ node_timex_offset_seconds{%(nodeExporterSelector)s} < -0.05
+ and
+ deriv(node_timex_offset_seconds{%(nodeExporterSelector)s}[5m]) <= 0
+ )
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Clock skew detected.',
+ description: 'Clock at {{ $labels.instance }} is out of sync by more than 0.05s. Ensure NTP is configured correctly on this host.',
+ },
+ },
+ {
+ alert: 'NodeClockNotSynchronising',
+ expr: |||
+ min_over_time(node_timex_sync_status{%(nodeExporterSelector)s}[5m]) == 0
+ and
+ node_timex_maxerror_seconds{%(nodeExporterSelector)s} >= 16
+ ||| % $._config,
+ 'for': '10m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Clock not synchronising.',
+ description: 'Clock at {{ $labels.instance }} is not synchronising. Ensure NTP is configured on this host.',
+ },
+ },
+ {
+ alert: 'NodeRAIDDegraded',
+ expr: |||
+ node_md_disks_required{%(nodeExporterSelector)s,%(diskDeviceSelector)s} - ignoring (state) (node_md_disks{state="active",%(nodeExporterSelector)s,%(diskDeviceSelector)s}) > 0
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'RAID Array is degraded.',
+ description: "RAID array '{{ $labels.device }}' at {{ $labels.instance }} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically.",
+ },
+ },
+ {
+ alert: 'NodeRAIDDiskFailure',
+ expr: |||
+ node_md_disks{state="failed",%(nodeExporterSelector)s,%(diskDeviceSelector)s} > 0
+ ||| % $._config,
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Failed device in RAID array.',
+ description: "At least one device in RAID array at {{ $labels.instance }} failed. Array '{{ $labels.device }}' needs attention and possibly a disk swap.",
+ },
+ },
+ {
+ alert: 'NodeFileDescriptorLimit',
+ expr: |||
+ (
+ node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 70
+ )
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Kernel is predicted to exhaust file descriptors limit soon.',
+ description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.',
+ },
+ },
+ {
+ alert: 'NodeFileDescriptorLimit',
+ expr: |||
+ (
+ node_filefd_allocated{%(nodeExporterSelector)s} * 100 / node_filefd_maximum{%(nodeExporterSelector)s} > 90
+ )
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'critical',
+ },
+ annotations: {
+ summary: 'Kernel is predicted to exhaust file descriptors limit soon.',
+ description: 'File descriptors limit at {{ $labels.instance }} is currently at {{ printf "%.2f" $value }}%.',
+ },
+ },
+ {
+ alert: 'NodeCPUHighUsage',
+ expr: |||
+ sum without(mode) (avg without (cpu) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode!="idle"}[2m]))) * 100 > %(cpuHighUsageThreshold)d
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'info',
+ },
+ annotations: {
+ summary: 'High CPU usage.',
+ description: |||
+ CPU usage at {{ $labels.instance }} has been above %(cpuHighUsageThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%.
+ ||| % $._config,
+ },
+ },
+ {
+ alert: 'NodeSystemSaturation',
+ expr: |||
+ node_load1{%(nodeExporterSelector)s}
+ / count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle"}) > %(systemSaturationPerCoreThreshold)d
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'System saturated, load per core is very high.',
+ description: |||
+ System load per core at {{ $labels.instance }} has been above %(systemSaturationPerCoreThreshold)d for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}.
+ This might indicate this instance resources saturation and can cause it becoming unresponsive.
+ ||| % $._config,
+ },
+ },
+ {
+ alert: 'NodeMemoryMajorPagesFaults',
+ expr: |||
+ rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[5m]) > %(memoryMajorPagesFaultsThreshold)d
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Memory major page faults are occurring at very high rate.',
+ description: |||
+ Memory major pages are occurring at very high rate at {{ $labels.instance }}, %(memoryMajorPagesFaultsThreshold)d major page faults per second for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}.
+ Please check that there is enough memory available at this instance.
+ ||| % $._config,
+ },
+ },
+ {
+ alert: 'NodeMemoryHighUtilization',
+ expr: |||
+ 100 - (node_memory_MemAvailable_bytes{%(nodeExporterSelector)s} / node_memory_MemTotal_bytes{%(nodeExporterSelector)s} * 100) > %(memoryHighUtilizationThreshold)d
+ ||| % $._config,
+ 'for': '15m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Host is running out of memory.',
+ description: |||
+ Memory is filling up at {{ $labels.instance }}, has been above %(memoryHighUtilizationThreshold)d%% for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}%%.
+ ||| % $._config,
+ },
+ },
+ {
+ alert: 'NodeDiskIOSaturation',
+ expr: |||
+ rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[5m]) > %(diskIOSaturationThreshold)d
+ ||| % $._config,
+ 'for': '30m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Disk IO queue is high.',
+ description: |||
+ Disk IO queue (aqu-sq) is high on {{ $labels.device }} at {{ $labels.instance }}, has been above %(diskIOSaturationThreshold)d for the last 15 minutes, is currently at {{ printf "%%.2f" $value }}.
+ This symptom might indicate disk saturation.
+ ||| % $._config,
+ },
+ },
+ {
+ alert: 'NodeSystemdServiceFailed',
+ expr: |||
+ node_systemd_unit_state{%(nodeExporterSelector)s, state="failed"} == 1
+ ||| % $._config,
+ 'for': '5m',
+ labels: {
+ severity: 'warning',
+ },
+ annotations: {
+ summary: 'Systemd service has entered failed state.',
+ description: 'Systemd service {{ $labels.name }} has entered failed state at {{ $labels.instance }}',
+ },
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/config.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/config.libsonnet
new file mode 100644
index 0000000..4427b59
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/config.libsonnet
@@ -0,0 +1,90 @@
+{
+ _config+:: {
+ // Selectors are inserted between {} in Prometheus queries.
+
+ // Select the metrics coming from the node exporter. Note that all
+ // the selected metrics are shown stacked on top of each other in
+ // the 'USE Method / Cluster' dashboard. Consider disabling that
+ // dashboard if mixing up all those metrics in the same dashboard
+ // doesn't make sense (e.g. because they are coming from different
+ // clusters).
+ nodeExporterSelector: 'job="node"',
+
+ // Select the fstype for filesystem-related queries. If left
+ // empty, all filesystems are selected. If you have unusual
+ // filesystem you don't want to include in dashboards and
+ // alerting, you can exclude them here, e.g. 'fstype!="tmpfs"'.
+ fsSelector: 'fstype!=""',
+
+ // Select the mountpoint for filesystem-related queries. If left
+ // empty, all mountpoints are selected. For example if you have a
+ // special purpose tmpfs instance that has a fixed size and will
+ // always be 100% full, but you still want alerts and dashboards for
+ // other tmpfs instances, you can exclude those by mountpoint prefix
+ // like so: 'mountpoint!~"/var/lib/foo.*"'.
+ fsMountpointSelector: 'mountpoint!=""',
+
+ // Select the device for disk-related queries. If left empty, all
+ // devices are selected. If you have unusual devices you don't
+ // want to include in dashboards and alerting, you can exclude
+ // them here, e.g. 'device!="tmpfs"'.
+ diskDeviceSelector: 'device!=""',
+
+ // Some of the alerts are meant to fire if a critical failure of a
+ // node is imminent (e.g. the disk is about to run full). In a
+ // true “cloud native” setup, failures of a single node should be
+ // tolerated. Hence, even imminent failure of a single node is no
+ // reason to create a paging alert. However, in practice there are
+ // still many situations where operators like to get paged in time
+ // before a node runs out of disk space. nodeCriticalSeverity can
+ // be set to the desired severity for this kind of alerts. This
+ // can even be templated to depend on labels of the node, e.g. you
+ // could make this critical for traditional database masters but
+ // just a warning for K8s nodes.
+ nodeCriticalSeverity: 'critical',
+
+ // CPU utilization (%) on which to trigger the
+ // 'NodeCPUHighUsage' alert.
+ cpuHighUsageThreshold: 90,
+ // Load average 1m (per core) on which to trigger the
+ // 'NodeSystemSaturation' alert.
+ systemSaturationPerCoreThreshold: 2,
+
+ // Available disk space (%) thresholds on which to trigger the
+ // 'NodeFilesystemSpaceFillingUp' alerts. These alerts fire if the disk
+ // usage grows in a way that it is predicted to run out in 4h or 1d
+ // and if the provided thresholds have been reached right now.
+ // In some cases you'll want to adjust these, e.g. by default Kubernetes
+ // runs the image garbage collection when the disk usage reaches 85%
+ // of its available space. In that case, you'll want to reduce the
+ // critical threshold below to something like 14 or 15, otherwise
+ // the alert could fire under normal node usage.
+ fsSpaceFillingUpWarningThreshold: 40,
+ fsSpaceFillingUpCriticalThreshold: 20,
+
+ // Available disk space (%) thresholds on which to trigger the
+ // 'NodeFilesystemAlmostOutOfSpace' alerts.
+ fsSpaceAvailableWarningThreshold: 5,
+ fsSpaceAvailableCriticalThreshold: 3,
+
+ // Memory utilzation (%) level on which to trigger the
+ // 'NodeMemoryHighUtilization' alert.
+ memoryHighUtilizationThreshold: 90,
+
+ // Threshold for the rate of memory major page faults to trigger
+ // 'NodeMemoryMajorPagesFaults' alert.
+ memoryMajorPagesFaultsThreshold: 500,
+
+ // Disk IO queue level above which to trigger
+ // 'NodeDiskIOSaturation' alert.
+ diskIOSaturationThreshold: 10,
+
+ rateInterval: '5m',
+ // Opt-in for multi-cluster support.
+ showMultiCluster: false,
+ clusterLabel: 'cluster',
+
+ dashboardNamePrefix: 'Node Exporter / ',
+ dashboardTags: ['node-exporter-mixin'],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards.jsonnet
new file mode 100644
index 0000000..9d913ed
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards.jsonnet
@@ -0,0 +1,6 @@
+local dashboards = (import 'mixin.libsonnet').grafanaDashboards;
+
+{
+ [name]: dashboards[name]
+ for name in std.objectFields(dashboards)
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/dashboards.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/dashboards.libsonnet
new file mode 100644
index 0000000..e6adbd4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/dashboards.libsonnet
@@ -0,0 +1,2 @@
+(import 'node.libsonnet') +
+(import 'use.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/node.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/node.libsonnet
new file mode 100644
index 0000000..898c912
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/node.libsonnet
@@ -0,0 +1,7 @@
+{
+ local nodemixin = import '../lib/prom-mixin.libsonnet',
+ grafanaDashboards+:: {
+ 'nodes.json': nodemixin.new(config=$._config, platform='Linux').dashboard,
+ 'nodes-darwin.json': nodemixin.new(config=$._config, platform='Darwin').dashboard,
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/use.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/use.libsonnet
new file mode 100644
index 0000000..65e96dd
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/dashboards/use.libsonnet
@@ -0,0 +1,466 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+
+local c = import '../config.libsonnet';
+
+local datasourceTemplate = {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: '',
+ type: 'datasource',
+};
+
+local CPUUtilisation =
+ graphPanel.new(
+ 'CPU Utilisation',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local CPUSaturation =
+ // TODO: Is this a useful panel? At least there should be some explanation how load
+ // average relates to the "CPU saturation" in the title.
+ graphPanel.new(
+ 'CPU Saturation (Load1 per CPU)',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local memoryUtilisation =
+ graphPanel.new(
+ 'Memory Utilisation',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local memorySaturation =
+ graphPanel.new(
+ 'Memory Saturation (Major Page Faults)',
+ datasource='$datasource',
+ span=6,
+ format='rds',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local networkUtilisation =
+ graphPanel.new(
+ 'Network Utilisation (Bytes Receive/Transmit)',
+ datasource='$datasource',
+ span=6,
+ format='Bps',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ )
+ .addSeriesOverride({ alias: '/Receive/', stack: 'A' })
+ .addSeriesOverride({ alias: '/Transmit/', stack: 'B', transform: 'negative-Y' })
+ { tooltip+: { sort: 2 } };
+
+local networkSaturation =
+ graphPanel.new(
+ 'Network Saturation (Drops Receive/Transmit)',
+ datasource='$datasource',
+ span=6,
+ format='Bps',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ )
+ .addSeriesOverride({ alias: '/ Receive/', stack: 'A' })
+ .addSeriesOverride({ alias: '/ Transmit/', stack: 'B', transform: 'negative-Y' })
+ { tooltip+: { sort: 2 } };
+
+local diskIOUtilisation =
+ graphPanel.new(
+ 'Disk IO Utilisation',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local diskIOSaturation =
+ graphPanel.new(
+ 'Disk IO Saturation',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+local diskSpaceUtilisation =
+ graphPanel.new(
+ 'Disk Space Utilisation',
+ datasource='$datasource',
+ span=12,
+ format='percentunit',
+ stack=true,
+ fill=10,
+ legend_show=false,
+ ) { tooltip+: { sort: 2 } };
+
+{
+ _clusterTemplate:: template.new(
+ name='cluster',
+ datasource='$datasource',
+ query='label_values(node_time_seconds, %s)' % $._config.clusterLabel,
+ current='',
+ hide=if $._config.showMultiCluster then '' else '2',
+ refresh=2,
+ includeAll=false,
+ sort=1
+ ),
+
+ grafanaDashboards+:: {
+ 'node-rsrc-use.json':
+
+ dashboard.new(
+ '%sUSE Method / Node' % $._config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=($._config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair'
+ )
+ .addTemplate(datasourceTemplate)
+ .addTemplate($._clusterTemplate)
+ .addTemplate(
+ template.new(
+ 'instance',
+ '$datasource',
+ 'label_values(node_exporter_build_info{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}, instance)' % $._config,
+ refresh='time',
+ sort=1
+ )
+ )
+ .addRow(
+ row.new('CPU')
+ .addPanel(CPUUtilisation.addTarget(prometheus.target('instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
+ .addPanel(CPUSaturation.addTarget(prometheus.target('instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Saturation')))
+ )
+ .addRow(
+ row.new('Memory')
+ .addPanel(memoryUtilisation.addTarget(prometheus.target('instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Utilisation')))
+ .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Major page Faults')))
+ )
+ .addRow(
+ row.new('Network')
+ .addPanel(
+ networkUtilisation
+ .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
+ .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
+ )
+ .addPanel(
+ networkSaturation
+ .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Receive'))
+ .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='Transmit'))
+ )
+ )
+ .addRow(
+ row.new('Disk IO')
+ .addPanel(diskIOUtilisation.addTarget(prometheus.target('instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
+ .addPanel(diskIOSaturation.addTarget(prometheus.target('instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, instance="$instance", %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{device}}')))
+ )
+ .addRow(
+ row.new('Disk Space')
+ .addPanel(
+ diskSpaceUtilisation.addTarget(prometheus.target(
+ |||
+ sort_desc(1 -
+ (
+ max without (mountpoint, fstype) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
+ /
+ max without (mountpoint, fstype) (node_filesystem_size_bytes{%(nodeExporterSelector)s, fstype!="", instance="$instance", %(clusterLabel)s="$cluster"})
+ ) != 0
+ )
+ ||| % $._config, legendFormat='{{device}}'
+ ))
+ )
+ ),
+
+ 'node-cluster-rsrc-use.json':
+ dashboard.new(
+ '%sUSE Method / Cluster' % $._config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=($._config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair'
+ )
+ .addTemplate(datasourceTemplate)
+ .addTemplate($._clusterTemplate)
+ .addRow(
+ row.new('CPU')
+ .addPanel(
+ CPUUtilisation
+ .addTarget(prometheus.target(
+ |||
+ ((
+ instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ *
+ instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ ) != 0 )
+ / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
+ ||| % $._config, legendFormat='{{ instance }}'
+ ))
+ )
+ .addPanel(
+ CPUSaturation
+ .addTarget(prometheus.target(
+ |||
+ (
+ instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
+ ) != 0
+ ||| % $._config, legendFormat='{{instance}}'
+ ))
+ )
+ )
+ .addRow(
+ row.new('Memory')
+ .addPanel(
+ memoryUtilisation
+ .addTarget(prometheus.target(
+ |||
+ (
+ instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
+ ) != 0
+ ||| % $._config, legendFormat='{{instance}}',
+ ))
+ )
+ .addPanel(memorySaturation.addTarget(prometheus.target('instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}' % $._config, legendFormat='{{instance}}')))
+ )
+ .addRow(
+ row.new('Network')
+ .addPanel(
+ networkUtilisation
+ .addTarget(prometheus.target('instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
+ .addTarget(prometheus.target('instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
+ )
+ .addPanel(
+ networkSaturation
+ .addTarget(prometheus.target('instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Receive'))
+ .addTarget(prometheus.target('instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"} != 0' % $._config, legendFormat='{{instance}} Transmit'))
+ )
+ )
+ .addRow(
+ row.new('Disk IO')
+ .addPanel(
+ diskIOUtilisation
+ .addTarget(prometheus.target(
+ |||
+ (
+ instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
+ ) != 0
+ ||| % $._config, legendFormat='{{instance}} {{device}}'
+ ))
+ )
+ .addPanel(
+ diskIOSaturation
+ .addTarget(prometheus.target(
+ |||
+ (
+ instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}
+ / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s, %(clusterLabel)s="$cluster"}))
+ ) != 0
+ ||| % $._config, legendFormat='{{instance}} {{device}}'
+ ))
+ )
+ )
+ .addRow(
+ row.new('Disk Space')
+ .addPanel(
+ diskSpaceUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum without (device) (
+ max without (fstype, mountpoint) ((
+ node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
+ -
+ node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"}
+ ) != 0)
+ )
+ / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s, %(clusterLabel)s="$cluster"})))
+ ||| % $._config, legendFormat='{{instance}}'
+ ))
+ )
+ ),
+ } +
+ if $._config.showMultiCluster then {
+ 'node-multicluster-rsrc-use.json':
+ dashboard.new(
+ '%sUSE Method / Multi-cluster' % $._config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=($._config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair'
+ )
+ .addTemplate(datasourceTemplate)
+ .addRow(
+ row.new('CPU')
+ .addPanel(
+ CPUUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum(
+ ((
+ instance:node_cpu_utilisation:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ *
+ instance:node_num_cpu:sum{%(nodeExporterSelector)s}
+ ) != 0)
+ / scalar(sum(instance:node_num_cpu:sum{%(nodeExporterSelector)s}))
+ ) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
+ ))
+ )
+ .addPanel(
+ CPUSaturation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}
+ / scalar(count(instance:node_load1_per_cpu:ratio{%(nodeExporterSelector)s}))
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
+ ))
+ )
+ )
+ .addRow(
+ row.new('Memory')
+ .addPanel(
+ memoryUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}
+ / scalar(count(instance:node_memory_utilisation:ratio{%(nodeExporterSelector)s}))
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
+ ))
+ )
+ .addPanel(
+ memorySaturation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_vmstat_pgmajfault:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
+ ))
+ )
+ )
+ .addRow(
+ row.new('Network')
+ .addPanel(
+ networkUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
+ ))
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
+ ))
+ )
+ .addPanel(
+ networkSaturation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} Receive' % $._config
+ ))
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ ) != 0) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} Transmit' % $._config
+ ))
+ )
+ )
+ .addRow(
+ row.new('Disk IO')
+ .addPanel(
+ diskIOUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ / scalar(count(instance_device:node_disk_io_time_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
+ ) != 0) by (%(clusterLabel)s, device)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
+ ))
+ )
+ .addPanel(
+ diskIOSaturation
+ .addTarget(prometheus.target(
+ |||
+ sum((
+ instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}
+ / scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s{%(nodeExporterSelector)s}))
+ ) != 0) by (%(clusterLabel)s, device)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}} {{device}}' % $._config
+ ))
+ )
+ )
+ .addRow(
+ row.new('Disk Space')
+ .addPanel(
+ diskSpaceUtilisation
+ .addTarget(prometheus.target(
+ |||
+ sum (
+ sum without (device) (
+ max without (fstype, mountpoint, instance, pod) ((
+ node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s} - node_filesystem_avail_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s}
+ ) != 0)
+ )
+ / scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, %(fsSelector)s, %(fsMountpointSelector)s})))
+ ) by (%(clusterLabel)s)
+ ||| % $._config, legendFormat='{{%(clusterLabel)s}}' % $._config
+ ))
+ )
+ ),
+ } else {},
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/jsonnetfile.json b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/jsonnetfile.json
new file mode 100644
index 0000000..721d483
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/jsonnetfile.json
@@ -0,0 +1,24 @@
+{
+ "version": 1,
+ "dependencies": [
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet"
+ }
+ },
+ "version": "master"
+ },
+ {
+ "source": {
+ "git": {
+ "remote": "https://github.com/grafana/grafonnet-lib.git",
+ "subdir": "grafonnet-7.0"
+ }
+ },
+ "version": "master"
+ }
+ ],
+ "legacyImports": false
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/lib/prom-mixin.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/lib/prom-mixin.libsonnet
new file mode 100644
index 0000000..6c4d990
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/lib/prom-mixin.libsonnet
@@ -0,0 +1,504 @@
+local grafana = import 'github.com/grafana/grafonnet-lib/grafonnet/grafana.libsonnet';
+local dashboard = grafana.dashboard;
+local row = grafana.row;
+local prometheus = grafana.prometheus;
+local template = grafana.template;
+local graphPanel = grafana.graphPanel;
+local grafana70 = import 'github.com/grafana/grafonnet-lib/grafonnet-7.0/grafana.libsonnet';
+local gaugePanel = grafana70.panel.gauge;
+local table = grafana70.panel.table;
+
+{
+
+ new(config=null, platform=null):: {
+
+ local prometheusDatasourceTemplate = {
+ current: {
+ text: 'default',
+ value: 'default',
+ },
+ hide: 0,
+ label: 'Data Source',
+ name: 'datasource',
+ options: [],
+ query: 'prometheus',
+ refresh: 1,
+ regex: '',
+ type: 'datasource',
+ },
+
+ local instanceTemplatePrototype =
+ template.new(
+ 'instance',
+ '$datasource',
+ '',
+ refresh='time',
+ label='Instance',
+ ),
+ local instanceTemplate =
+ if platform == 'Darwin' then
+ instanceTemplatePrototype
+ { query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname="Darwin"}, instance)' % config }
+ else
+ instanceTemplatePrototype
+ { query: 'label_values(node_uname_info{%(nodeExporterSelector)s, sysname!="Darwin"}, instance)' % config },
+
+
+ local idleCPU =
+ graphPanel.new(
+ 'CPU Usage',
+ datasource='$datasource',
+ span=6,
+ format='percentunit',
+ max=1,
+ min=0,
+ stack=true,
+ )
+ .addTarget(prometheus.target(
+ |||
+ (
+ (1 - sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal", instance="$instance"}[$__rate_interval])))
+ / ignoring(cpu) group_left
+ count without (cpu, mode) (node_cpu_seconds_total{%(nodeExporterSelector)s, mode="idle", instance="$instance"})
+ )
+ ||| % config,
+ legendFormat='{{cpu}}',
+ intervalFactor=5,
+ )),
+
+ local systemLoad =
+ graphPanel.new(
+ 'Load Average',
+ datasource='$datasource',
+ span=6,
+ format='short',
+ min=0,
+ fill=0,
+ )
+ .addTarget(prometheus.target('node_load1{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='1m load average'))
+ .addTarget(prometheus.target('node_load5{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='5m load average'))
+ .addTarget(prometheus.target('node_load15{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='15m load average'))
+ .addTarget(prometheus.target('count(node_cpu_seconds_total{%(nodeExporterSelector)s, instance="$instance", mode="idle"})' % config, legendFormat='logical cores')),
+
+ local memoryGraphPanelPrototype =
+ graphPanel.new(
+ 'Memory Usage',
+ datasource='$datasource',
+ span=9,
+ format='bytes',
+ min=0,
+ ),
+ local memoryGraph =
+ if platform == 'Linux' then
+ memoryGraphPanelPrototype { stack: true }
+ .addTarget(prometheus.target(
+ |||
+ (
+ node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ -
+ node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ -
+ node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ -
+ node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ )
+ ||| % config,
+ legendFormat='memory used'
+ ))
+ .addTarget(prometheus.target('node_memory_Buffers_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory buffers'))
+ .addTarget(prometheus.target('node_memory_Cached_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory cached'))
+ .addTarget(prometheus.target('node_memory_MemFree_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='memory free'))
+ else if platform == 'Darwin' then
+ // not useful to stack
+ memoryGraphPanelPrototype { stack: false }
+ .addTarget(prometheus.target('node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Physical Memory'))
+ .addTarget(prometheus.target(
+ |||
+ (
+ node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"} -
+ node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"} +
+ node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"} +
+ node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ )
+ ||| % config, legendFormat='Memory Used'
+ ))
+ .addTarget(prometheus.target(
+ |||
+ (
+ node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"} -
+ node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"}
+ )
+ ||| % config, legendFormat='App Memory'
+ ))
+ .addTarget(prometheus.target('node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Wired Memory'))
+ .addTarget(prometheus.target('node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"}' % config, legendFormat='Compressed')),
+
+ // NOTE: avg() is used to circumvent a label change caused by a node_exporter rollout.
+ local memoryGaugePanelPrototype =
+ gaugePanel.new(
+ title='Memory Usage',
+ datasource='$datasource',
+ )
+ .addThresholdStep('rgba(50, 172, 45, 0.97)')
+ .addThresholdStep('rgba(237, 129, 40, 0.89)', 80)
+ .addThresholdStep('rgba(245, 54, 54, 0.9)', 90)
+ .setFieldConfig(max=100, min=0, unit='percent')
+ + {
+ span: 3,
+ },
+
+ local memoryGauge =
+ if platform == 'Linux' then
+ memoryGaugePanelPrototype
+
+ .addTarget(prometheus.target(
+ |||
+ 100 -
+ (
+ avg(node_memory_MemAvailable_bytes{%(nodeExporterSelector)s, instance="$instance"}) /
+ avg(node_memory_MemTotal_bytes{%(nodeExporterSelector)s, instance="$instance"})
+ * 100
+ )
+ ||| % config,
+ ))
+
+ else if platform == 'Darwin' then
+ memoryGaugePanelPrototype
+ .addTarget(prometheus.target(
+ |||
+ (
+ (
+ avg(node_memory_internal_bytes{%(nodeExporterSelector)s, instance="$instance"}) -
+ avg(node_memory_purgeable_bytes{%(nodeExporterSelector)s, instance="$instance"}) +
+ avg(node_memory_wired_bytes{%(nodeExporterSelector)s, instance="$instance"}) +
+ avg(node_memory_compressed_bytes{%(nodeExporterSelector)s, instance="$instance"})
+ ) /
+ avg(node_memory_total_bytes{%(nodeExporterSelector)s, instance="$instance"})
+ )
+ *
+ 100
+ ||| % config
+ )),
+
+ local diskIO =
+ graphPanel.new(
+ 'Disk I/O',
+ datasource='$datasource',
+ span=6,
+ min=0,
+ fill=0,
+ )
+ // TODO: Does it make sense to have those three in the same panel?
+ .addTarget(prometheus.target(
+ 'rate(node_disk_read_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config,
+ legendFormat='{{device}} read',
+ intervalFactor=1,
+ ))
+ .addTarget(prometheus.target(
+ 'rate(node_disk_written_bytes_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config,
+ legendFormat='{{device}} written',
+ intervalFactor=1,
+ ))
+ .addTarget(prometheus.target(
+ 'rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, instance="$instance", %(diskDeviceSelector)s}[$__rate_interval])' % config,
+ legendFormat='{{device}} io time',
+ intervalFactor=1,
+ )) +
+ {
+ seriesOverrides: [
+ {
+ alias: '/ read| written/',
+ yaxis: 1,
+ },
+ {
+ alias: '/ io time/',
+ yaxis: 2,
+ },
+ ],
+ yaxes: [
+ self.yaxe(format='Bps'),
+ self.yaxe(format='percentunit'),
+ ],
+ },
+
+ local diskSpaceUsage =
+ table.new(
+ title='Disk Space Usage',
+ datasource='$datasource',
+ )
+ .setFieldConfig(unit='decbytes')
+ .addThresholdStep(color='green', value=null)
+ .addThresholdStep(color='yellow', value=0.8)
+ .addThresholdStep(color='red', value=0.9)
+ .addTarget(prometheus.target(
+ |||
+ max by (mountpoint) (node_filesystem_size_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s, %(fsMountpointSelector)s})
+ ||| % config,
+ legendFormat='',
+ instant=true,
+ format='table'
+ ))
+ .addTarget(prometheus.target(
+ |||
+ max by (mountpoint) (node_filesystem_avail_bytes{%(nodeExporterSelector)s, instance="$instance", %(fsSelector)s, %(fsMountpointSelector)s})
+ ||| % config,
+ legendFormat='',
+ instant=true,
+ format='table'
+ ))
+ .addOverride(
+ matcher={
+ id: 'byName',
+ options: 'Mounted on',
+ },
+ properties=[
+ {
+ id: 'custom.width',
+ value: 260,
+ },
+ ],
+ )
+ .addOverride(
+ matcher={
+ id: 'byName',
+ options: 'Size',
+ },
+ properties=[
+
+ {
+ id: 'custom.width',
+ value: 93,
+ },
+
+ ],
+ )
+ .addOverride(
+ matcher={
+ id: 'byName',
+ options: 'Used',
+ },
+ properties=[
+ {
+ id: 'custom.width',
+ value: 72,
+ },
+ ],
+ )
+ .addOverride(
+ matcher={
+ id: 'byName',
+ options: 'Available',
+ },
+ properties=[
+ {
+ id: 'custom.width',
+ value: 88,
+ },
+ ],
+ )
+
+ .addOverride(
+ matcher={
+ id: 'byName',
+ options: 'Used, %',
+ },
+ properties=[
+ {
+ id: 'unit',
+ value: 'percentunit',
+ },
+ {
+ id: 'custom.displayMode',
+ value: 'gradient-gauge',
+ },
+ {
+ id: 'max',
+ value: 1,
+ },
+ {
+ id: 'min',
+ value: 0,
+ },
+ ]
+ )
+ + { span: 6 }
+ + {
+ transformations: [
+ {
+ id: 'groupBy',
+ options: {
+ fields: {
+ 'Value #A': {
+ aggregations: [
+ 'lastNotNull',
+ ],
+ operation: 'aggregate',
+ },
+ 'Value #B': {
+ aggregations: [
+ 'lastNotNull',
+ ],
+ operation: 'aggregate',
+ },
+ mountpoint: {
+ aggregations: [],
+ operation: 'groupby',
+ },
+ },
+ },
+ },
+ {
+ id: 'merge',
+ options: {},
+ },
+ {
+ id: 'calculateField',
+ options: {
+ alias: 'Used',
+ binary: {
+ left: 'Value #A (lastNotNull)',
+ operator: '-',
+ reducer: 'sum',
+ right: 'Value #B (lastNotNull)',
+ },
+ mode: 'binary',
+ reduce: {
+ reducer: 'sum',
+ },
+ },
+ },
+ {
+ id: 'calculateField',
+ options: {
+ alias: 'Used, %',
+ binary: {
+ left: 'Used',
+ operator: '/',
+ reducer: 'sum',
+ right: 'Value #A (lastNotNull)',
+ },
+ mode: 'binary',
+ reduce: {
+ reducer: 'sum',
+ },
+ },
+ },
+ {
+ id: 'organize',
+ options: {
+ excludeByName: {},
+ indexByName: {},
+ renameByName: {
+ 'Value #A (lastNotNull)': 'Size',
+ 'Value #B (lastNotNull)': 'Available',
+ mountpoint: 'Mounted on',
+ },
+ },
+ },
+ {
+ id: 'sortBy',
+ options: {
+ fields: {},
+ sort: [
+ {
+ field: 'Mounted on',
+ },
+ ],
+ },
+ },
+ ],
+ },
+
+
+ local networkReceived =
+ graphPanel.new(
+ 'Network Received',
+ description='Network received (bits/s)',
+ datasource='$datasource',
+ span=6,
+ format='bps',
+ min=0,
+ fill=0,
+ )
+ .addTarget(prometheus.target(
+ 'rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval]) * 8' % config,
+ legendFormat='{{device}}',
+ intervalFactor=1,
+ )),
+
+ local networkTransmitted =
+ graphPanel.new(
+ 'Network Transmitted',
+ description='Network transmitted (bits/s)',
+ datasource='$datasource',
+ span=6,
+ format='bps',
+ min=0,
+ fill=0,
+ )
+ .addTarget(prometheus.target(
+ 'rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, instance="$instance", device!="lo"}[$__rate_interval]) * 8' % config,
+ legendFormat='{{device}}',
+ intervalFactor=1,
+ )),
+
+ local cpuRow =
+ row.new('CPU')
+ .addPanel(idleCPU)
+ .addPanel(systemLoad),
+
+ local memoryRow =
+ row.new('Memory')
+ .addPanel(memoryGraph)
+ .addPanel(memoryGauge),
+
+ local diskRow =
+ row.new('Disk')
+ .addPanel(diskIO)
+ .addPanel(diskSpaceUsage),
+
+ local networkRow =
+ row.new('Network')
+ .addPanel(networkReceived)
+ .addPanel(networkTransmitted),
+
+ local rows =
+ [
+ cpuRow,
+ memoryRow,
+ diskRow,
+ networkRow,
+ ],
+
+ local templates =
+ [
+ prometheusDatasourceTemplate,
+ instanceTemplate,
+ ],
+
+
+ dashboard: if platform == 'Linux' then
+ dashboard.new(
+ '%sNodes' % config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=(config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair'
+ )
+ .addTemplates(templates)
+ .addRows(rows)
+ else if platform == 'Darwin' then
+ dashboard.new(
+ '%sMacOS' % config.dashboardNamePrefix,
+ time_from='now-1h',
+ tags=(config.dashboardTags),
+ timezone='utc',
+ refresh='30s',
+ graphTooltip='shared_crosshair'
+ )
+ .addTemplates(templates)
+ .addRows(rows),
+
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet
new file mode 100644
index 0000000..b9831f9
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/mixin.libsonnet
@@ -0,0 +1,4 @@
+(import 'config.libsonnet') +
+(import 'alerts/alerts.libsonnet') +
+(import 'dashboards/dashboards.libsonnet') +
+(import 'rules/rules.libsonnet')
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules.jsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules.jsonnet
new file mode 100644
index 0000000..dbe13f4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules.jsonnet
@@ -0,0 +1 @@
+std.manifestYamlDoc((import 'mixin.libsonnet').prometheusRules)
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules/rules.libsonnet b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules/rules.libsonnet
new file mode 100644
index 0000000..9c8eb90
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/github.com/prometheus/node_exporter/docs/node-mixin/rules/rules.libsonnet
@@ -0,0 +1,119 @@
+{
+ prometheusRules+:: {
+ groups+: [
+ {
+ name: 'node-exporter.rules',
+ rules: [
+ {
+ // This rule gives the number of CPUs per node.
+ record: 'instance:node_num_cpu:sum',
+ expr: |||
+ count without (cpu, mode) (
+ node_cpu_seconds_total{%(nodeExporterSelector)s,mode="idle"}
+ )
+ ||| % $._config,
+ },
+ {
+ // CPU utilisation is % CPU without {idle,iowait,steal}.
+ record: 'instance:node_cpu_utilisation:rate%(rateInterval)s' % $._config,
+ expr: |||
+ 1 - avg without (cpu) (
+ sum without (mode) (rate(node_cpu_seconds_total{%(nodeExporterSelector)s, mode=~"idle|iowait|steal"}[%(rateInterval)s]))
+ )
+ ||| % $._config,
+ },
+ {
+ // This is CPU saturation: 1min avg run queue length / number of CPUs.
+ // Can go over 1.
+ // TODO: There are situation where a run queue >1/core is just normal and fine.
+ // We need to clarify how to read this metric and if its usage is helpful at all.
+ record: 'instance:node_load1_per_cpu:ratio',
+ expr: |||
+ (
+ node_load1{%(nodeExporterSelector)s}
+ /
+ instance:node_num_cpu:sum{%(nodeExporterSelector)s}
+ )
+ ||| % $._config,
+ },
+ {
+ // Memory utilisation (ratio of used memory per instance).
+ record: 'instance:node_memory_utilisation:ratio',
+ expr: |||
+ 1 - (
+ (
+ node_memory_MemAvailable_bytes{%(nodeExporterSelector)s}
+ or
+ (
+ node_memory_Buffers_bytes{%(nodeExporterSelector)s}
+ +
+ node_memory_Cached_bytes{%(nodeExporterSelector)s}
+ +
+ node_memory_MemFree_bytes{%(nodeExporterSelector)s}
+ +
+ node_memory_Slab_bytes{%(nodeExporterSelector)s}
+ )
+ )
+ /
+ node_memory_MemTotal_bytes{%(nodeExporterSelector)s}
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'instance:node_vmstat_pgmajfault:rate%(rateInterval)s' % $._config,
+ expr: |||
+ rate(node_vmstat_pgmajfault{%(nodeExporterSelector)s}[%(rateInterval)s])
+ ||| % $._config,
+ },
+ {
+ // Disk utilisation (seconds spent, 1 second rate).
+ record: 'instance_device:node_disk_io_time_seconds:rate%(rateInterval)s' % $._config,
+ expr: |||
+ rate(node_disk_io_time_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s])
+ ||| % $._config,
+ },
+ {
+ // Disk saturation (weighted seconds spent, 1 second rate).
+ record: 'instance_device:node_disk_io_time_weighted_seconds:rate%(rateInterval)s' % $._config,
+ expr: |||
+ rate(node_disk_io_time_weighted_seconds_total{%(nodeExporterSelector)s, %(diskDeviceSelector)s}[%(rateInterval)s])
+ ||| % $._config,
+ },
+ {
+ record: 'instance:node_network_receive_bytes_excluding_lo:rate%(rateInterval)s' % $._config,
+ expr: |||
+ sum without (device) (
+ rate(node_network_receive_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s])
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'instance:node_network_transmit_bytes_excluding_lo:rate%(rateInterval)s' % $._config,
+ expr: |||
+ sum without (device) (
+ rate(node_network_transmit_bytes_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s])
+ )
+ ||| % $._config,
+ },
+ // TODO: Find out if those drops ever happen on modern switched networks.
+ {
+ record: 'instance:node_network_receive_drop_excluding_lo:rate%(rateInterval)s' % $._config,
+ expr: |||
+ sum without (device) (
+ rate(node_network_receive_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s])
+ )
+ ||| % $._config,
+ },
+ {
+ record: 'instance:node_network_transmit_drop_excluding_lo:rate%(rateInterval)s' % $._config,
+ expr: |||
+ sum without (device) (
+ rate(node_network_transmit_drop_total{%(nodeExporterSelector)s, device!="lo"}[%(rateInterval)s])
+ )
+ ||| % $._config,
+ },
+ ],
+ },
+ ],
+ },
+}
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/grafonnet-7.0 b/roles/kube_prometheus_stack/files/jsonnet/vendor/grafonnet-7.0
new file mode 120000
index 0000000..5e7fee6
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/grafonnet-7.0
@@ -0,0 +1 @@
+github.com/grafana/grafonnet-lib/grafonnet-7.0
\ No newline at end of file
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/kubernetes-mixin b/roles/kube_prometheus_stack/files/jsonnet/vendor/kubernetes-mixin
new file mode 120000
index 0000000..cd2bed4
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/kubernetes-mixin
@@ -0,0 +1 @@
+github.com/kubernetes-monitoring/kubernetes-mixin
\ No newline at end of file
diff --git a/roles/kube_prometheus_stack/files/jsonnet/vendor/node-mixin b/roles/kube_prometheus_stack/files/jsonnet/vendor/node-mixin
new file mode 120000
index 0000000..877dfdb
--- /dev/null
+++ b/roles/kube_prometheus_stack/files/jsonnet/vendor/node-mixin
@@ -0,0 +1 @@
+github.com/prometheus/node_exporter/docs/node-mixin
\ No newline at end of file
diff --git a/roles/kube_prometheus_stack/tasks/main.yml b/roles/kube_prometheus_stack/tasks/main.yml
index d2f1f2c..a80b36b 100644
--- a/roles/kube_prometheus_stack/tasks/main.yml
+++ b/roles/kube_prometheus_stack/tasks/main.yml
@@ -65,6 +65,19 @@
name: "{{ kube_prometheus_stack_helm_release_name }}"
namespace: "{{ kube_prometheus_stack_helm_release_namespace }}"
+- name: Install all CRDs
+ run_once: true
+ changed_when: false
+ kubernetes.core.k8s:
+ state: present
+ definition: "{{ lookup('pipe', 'cat ' + playbook_dir + '/../charts/kube-prometheus-stack/charts/crds/crds/crd-*.yaml') | regex_replace('- =$', '- \"=\"', multiline=True) | from_yaml_all }}" # noqa: yaml[line-length]
+ apply: true
+ server_side_apply:
+ field_manager: atmosphere
+ force_conflicts: true
+ tags:
+ - kube-prometheus-stack-crds
+
- name: Deploy Helm chart
run_once: true
kubernetes.core.helm:
diff --git a/roles/kube_prometheus_stack/vars/main.yml b/roles/kube_prometheus_stack/vars/main.yml
index f488b13..b19ced8 100644
--- a/roles/kube_prometheus_stack/vars/main.yml
+++ b/roles/kube_prometheus_stack/vars/main.yml
@@ -1,10 +1,59 @@
_kube_prometheus_stack_helm_values:
defaultRules:
+ rules:
+ alertmanager: false
+ etcd: true
+ configReloaders: true
+ general: true
+ k8s: false
+ kubeApiserverAvailability: false
+ kubeApiserverBurnrate: false
+ kubeApiserverHistogram: false
+ kubeApiserverSlos: false
+ kubeControllerManager: false
+ kubelet: false
+ kubeProxy: false
+ kubePrometheusGeneral: true
+ kubePrometheusNodeRecording: true
+ kubernetesApps: false
+ kubernetesResources: false
+ kubernetesStorage: false
+ kubernetesSystem: false
+ kubeSchedulerAlerting: false
+ kubeSchedulerRecording: false
+ kubeStateMetrics: true
+ network: true
+ node: false
+ nodeExporterAlerting: false
+ nodeExporterRecording: false
+ prometheus: true
+ prometheusOperator: true
+ windows: false
disabled:
# NOTE(mnaser): https://github.com/prometheus-community/helm-charts/issues/144
# https://github.com/openshift/cluster-monitoring-operator/issues/248
etcdHighNumberOfFailedGRPCRequests: true
alertmanager:
+ config:
+ route:
+ group_by:
+ - alertname
+ - severity
+ receiver: notifier
+ routes:
+ - receiver: "null"
+ matchers:
+ - alertname = "InfoInhibitor"
+ - receiver: heartbeat
+ group_wait: 0s
+ group_interval: 30s
+ repeat_interval: 15s
+ matchers:
+ - alertname = "Watchdog"
+ receivers:
+ - name: "null"
+ - name: notifier
+ - name: heartbeat
serviceMonitor:
relabelings: &relabelings_instance_to_pod_name
- &relabeling_set_pod_name_to_instance
@@ -16,7 +65,8 @@
regex: ^(container|endpoint|namespace|pod|node|service)$
alertmanagerSpec:
image:
- repository: "{{ atmosphere_images['alertmanager'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['alertmanager'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['alertmanager'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['alertmanager'] | vexxhost.kubernetes.docker_image('tag') }}"
storage:
volumeClaimTemplate:
@@ -133,7 +183,8 @@
relabelings: *relabelings_instance_to_node_name
kube-state-metrics:
image:
- repository: "{{ atmosphere_images['kube_state_metrics'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['kube_state_metrics'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['kube_state_metrics'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['kube_state_metrics'] | vexxhost.kubernetes.docker_image('tag') }}"
prometheus:
monitor:
@@ -144,7 +195,8 @@
relabelings: *relabelings_instance_to_pod_name
prometheusSpec:
image:
- repository: "{{ atmosphere_images['prometheus'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['prometheus'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['prometheus'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['prometheus'] | vexxhost.kubernetes.docker_image('tag') }}"
nodeSelector: *node_selector
secrets:
@@ -271,22 +323,26 @@
admissionWebhooks:
patch:
image:
- repository: "{{ atmosphere_images['prometheus_operator_kube_webhook_certgen'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['prometheus_operator_kube_webhook_certgen'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['prometheus_operator_kube_webhook_certgen'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['prometheus_operator_kube_webhook_certgen'] | vexxhost.kubernetes.docker_image('tag') }}"
nodeSelector: *node_selector
serviceMonitor:
relabelings: *relabelings_instance_to_pod_name
nodeSelector: *node_selector
image:
- repository: "{{ atmosphere_images['prometheus_operator'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['prometheus_operator'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['prometheus_operator'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['prometheus_operator'] | vexxhost.kubernetes.docker_image('tag') }}"
prometheusConfigReloader:
image:
- repository: "{{ atmosphere_images['prometheus_config_reloader'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['prometheus_config_reloader'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['prometheus_config_reloader'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['prometheus_config_reloader'] | vexxhost.kubernetes.docker_image('tag') }}"
prometheus-node-exporter:
image:
- repository: "{{ atmosphere_images['prometheus_node_exporter'] | vexxhost.kubernetes.docker_image('name') }}"
+ registry: "{{ atmosphere_images['prometheus_node_exporter'] | vexxhost.kubernetes.docker_image('domain') }}"
+ repository: "{{ atmosphere_images['prometheus_node_exporter'] | vexxhost.kubernetes.docker_image('path') }}"
tag: "{{ atmosphere_images['prometheus_node_exporter'] | vexxhost.kubernetes.docker_image('tag') }}"
extraArgs:
- --collector.diskstats.ignored-devices=^(ram|loop|nbd|fd|(h|s|v|xv)d[a-z]|nvme\\d+n\\d+p)\\d+$