summaryrefslogtreecommitdiff
path: root/config
diff options
context:
space:
mode:
authorrpereira2 <rpereira@gitlab.com>2019-04-11 20:09:58 +0530
committersyasonik <syasonik@gitlab.com>2019-04-17 20:02:24 +0800
commit8a51b21918161dda5977a92327175a1728efa1c8 (patch)
tree90de06942c767b7b350afc7d9c7d98763814a69a /config
parent38da5d6b23577edcd2a924125e0e4f3f8236f234 (diff)
downloadgitlab-ce-8a51b21918161dda5977a92327175a1728efa1c8.tar.gz
Change the schema of the common_metrics.yml
- Change it to the new dashboard syntax described here: https://gitlab.com/gitlab-org/gitlab-ce/issues/60383.
Diffstat (limited to 'config')
-rw-r--r--config/prometheus/common_metrics.yml86
1 files changed, 55 insertions, 31 deletions
diff --git a/config/prometheus/common_metrics.yml b/config/prometheus/common_metrics.yml
index 884868c6336..232b76bb180 100644
--- a/config/prometheus/common_metrics.yml
+++ b/config/prometheus/common_metrics.yml
@@ -1,13 +1,17 @@
- # NGINX Ingress metrics for pre-0.16.0 versions
+dashboard: 'Environment metrics'
+order: 1
+panel_groups:
+# NGINX Ingress metrics for pre-0.16.0 versions
- group: Response metrics (NGINX Ingress VTS)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_upstream_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_throughput_status_code
query_range: 'sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) by (status_code)'
unit: req / sec
@@ -22,21 +26,23 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_upstream_response_msecs_avg
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_latency_pod_average
query_range: 'avg(nginx_upstream_response_msecs_avg{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"})'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_upstream_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_http_error_rate
query_range: 'sum(rate(nginx_upstream_responses_total{status_code="5xx", upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) / sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) * 100'
label: 5xx Errors
@@ -44,13 +50,14 @@
# NGINX Ingress metrics for post-0.16.0 versions
- group: Response metrics (NGINX Ingress)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_throughput_status_code
query_range: 'sum(label_replace(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m]), "status_code", "${1}xx", "status", "(.)..")) by (status_code)'
unit: req / sec
@@ -67,34 +74,37 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_ingress_controller_ingress_upstream_latency_seconds_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_latency_pod_average
query_range: 'sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 1000'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_http_error_rate
query_range: 'sum(rate(nginx_ingress_controller_requests{status=~"5.*",namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 100'
label: 5xx Errors
unit: "%"
- group: Response metrics (HA Proxy)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- haproxy_frontend_http_requests_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_ha_proxy_throughput_status_code
query_range: 'sum(rate(haproxy_frontend_http_requests_total{%{environment_filter}}[2m])) by (code)'
unit: req / sec
@@ -109,58 +119,63 @@
- value: 5xx
color: red
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- haproxy_frontend_http_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_ha_proxy_http_error_rate
query_range: 'sum(rate(haproxy_frontend_http_responses_total{code="5xx",%{environment_filter}}[2m])) / sum(rate(haproxy_frontend_http_responses_total{%{environment_filter}}[2m]))'
label: HTTP Errors
unit: "%"
- group: Response metrics (AWS ELB)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- aws_elb_request_count_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_throughput_requests
query_range: 'sum(aws_elb_request_count_sum{%{environment_filter}}) / 60'
label: Total
unit: req / sec
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- aws_elb_latency_average
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_latency_average
query_range: 'avg(aws_elb_latency_average{%{environment_filter}}) * 1000'
label: Average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- aws_elb_request_count_sum
- aws_elb_httpcode_backend_5_xx_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_http_error_rate
query_range: 'sum(aws_elb_httpcode_backend_5_xx_sum{%{environment_filter}}) / sum(aws_elb_request_count_sum{%{environment_filter}})'
label: HTTP Errors
unit: "%"
- group: Response metrics (NGINX)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_server_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_throughput_status_code
query_range: 'sum(rate(nginx_server_requests{server_zone!="*", server_zone!="_", %{environment_filter}}[2m])) by (code)'
unit: req / sec
@@ -175,96 +190,105 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_server_requestMsec
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_latency
query_range: 'avg(nginx_server_requestMsec{%{environment_filter}})'
label: Upstream
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP 500 Errors / Sec"
required_metrics:
- nginx_server_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_http_error_rate
query_range: 'sum(rate(nginx_server_requests{code="5xx", %{environment_filter}}[2m]))'
label: HTTP Errors
unit: "errors / sec"
- group: System metrics (Kubernetes)
priority: 5
- metrics:
+ panels:
- title: "Memory Usage (Total)"
+ type: "area-chart"
y_label: "Total Memory Used"
required_metrics:
- container_memory_usage_bytes
weight: 4
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_total
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) /1024/1024/1024'
label: Total
unit: GB
- title: "Core Usage (Total)"
+ type: "area-chart"
y_label: "Total Cores"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 3
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_cores_total
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job)'
label: Total
unit: "cores"
- title: "Memory Usage (Pod average)"
+ type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_average
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
- title: "Canary: Memory Usage (Pod Average)"
+ type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_average_canary
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
track: canary
- title: "Core Usage (Pod Average)"
+ type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 1
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_core_usage
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
- title: "Canary: Core Usage (Pod Average)"
+ type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 1
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_core_usage_canary
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
track: canary
- title: "Knative function invocations"
+ type: "area-chart"
y_label: "Invocations"
required_metrics:
- istio_revision_request_count
weight: 1
- queries:
+ metrics:
- id: system_metrics_knative_function_invocation_count
query_range: 'floor(sum(rate(istio_revision_request_count{destination_configuration="%{function_name}", destination_namespace="%{kube_namespace}"}[1m])*30))'
label: invocations / minute