summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrpereira2 <rpereira@gitlab.com>2019-04-11 20:09:58 +0530
committersyasonik <syasonik@gitlab.com>2019-04-17 20:02:24 +0800
commit8a51b21918161dda5977a92327175a1728efa1c8 (patch)
tree90de06942c767b7b350afc7d9c7d98763814a69a
parent38da5d6b23577edcd2a924125e0e4f3f8236f234 (diff)
downloadgitlab-ce-8a51b21918161dda5977a92327175a1728efa1c8.tar.gz
Change the schema of the common_metrics.yml
- Change it to the new dashboard syntax described here: https://gitlab.com/gitlab-org/gitlab-ce/issues/60383.
-rw-r--r--config/prometheus/common_metrics.yml86
-rw-r--r--db/importers/common_metrics_importer.rb26
-rw-r--r--spec/db/importers/common_metrics_importer_spec.rb44
3 files changed, 91 insertions, 65 deletions
diff --git a/config/prometheus/common_metrics.yml b/config/prometheus/common_metrics.yml
index 884868c6336..232b76bb180 100644
--- a/config/prometheus/common_metrics.yml
+++ b/config/prometheus/common_metrics.yml
@@ -1,13 +1,17 @@
- # NGINX Ingress metrics for pre-0.16.0 versions
+dashboard: 'Environment metrics'
+order: 1
+panel_groups:
+# NGINX Ingress metrics for pre-0.16.0 versions
- group: Response metrics (NGINX Ingress VTS)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_upstream_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_throughput_status_code
query_range: 'sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) by (status_code)'
unit: req / sec
@@ -22,21 +26,23 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_upstream_response_msecs_avg
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_latency_pod_average
query_range: 'avg(nginx_upstream_response_msecs_avg{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"})'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_upstream_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_http_error_rate
query_range: 'sum(rate(nginx_upstream_responses_total{status_code="5xx", upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) / sum(rate(nginx_upstream_responses_total{upstream=~"%{kube_namespace}-%{ci_environment_slug}-.*"}[2m])) * 100'
label: 5xx Errors
@@ -44,13 +50,14 @@
# NGINX Ingress metrics for post-0.16.0 versions
- group: Response metrics (NGINX Ingress)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_throughput_status_code
query_range: 'sum(label_replace(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m]), "status_code", "${1}xx", "status", "(.)..")) by (status_code)'
unit: req / sec
@@ -67,34 +74,37 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_ingress_controller_ingress_upstream_latency_seconds_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_latency_pod_average
query_range: 'sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 1000'
label: Pod average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP Errors"
required_metrics:
- nginx_ingress_controller_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_ingress_16_http_error_rate
query_range: 'sum(rate(nginx_ingress_controller_requests{status=~"5.*",namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) / sum(rate(nginx_ingress_controller_requests{namespace="%{kube_namespace}",ingress=~".*%{ci_environment_slug}.*"}[2m])) * 100'
label: 5xx Errors
unit: "%"
- group: Response metrics (HA Proxy)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- haproxy_frontend_http_requests_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_ha_proxy_throughput_status_code
query_range: 'sum(rate(haproxy_frontend_http_requests_total{%{environment_filter}}[2m])) by (code)'
unit: req / sec
@@ -109,58 +119,63 @@
- value: 5xx
color: red
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- haproxy_frontend_http_responses_total
weight: 1
- queries:
+ metrics:
- id: response_metrics_ha_proxy_http_error_rate
query_range: 'sum(rate(haproxy_frontend_http_responses_total{code="5xx",%{environment_filter}}[2m])) / sum(rate(haproxy_frontend_http_responses_total{%{environment_filter}}[2m]))'
label: HTTP Errors
unit: "%"
- group: Response metrics (AWS ELB)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- aws_elb_request_count_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_throughput_requests
query_range: 'sum(aws_elb_request_count_sum{%{environment_filter}}) / 60'
label: Total
unit: req / sec
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- aws_elb_latency_average
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_latency_average
query_range: 'avg(aws_elb_latency_average{%{environment_filter}}) * 1000'
label: Average
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "Error Rate (%)"
required_metrics:
- aws_elb_request_count_sum
- aws_elb_httpcode_backend_5_xx_sum
weight: 1
- queries:
+ metrics:
- id: response_metrics_aws_elb_http_error_rate
query_range: 'sum(aws_elb_httpcode_backend_5_xx_sum{%{environment_filter}}) / sum(aws_elb_request_count_sum{%{environment_filter}})'
label: HTTP Errors
unit: "%"
- group: Response metrics (NGINX)
priority: 10
- metrics:
+ panels:
- title: "Throughput"
+ type: "area-chart"
y_label: "Requests / Sec"
required_metrics:
- nginx_server_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_throughput_status_code
query_range: 'sum(rate(nginx_server_requests{server_zone!="*", server_zone!="_", %{environment_filter}}[2m])) by (code)'
unit: req / sec
@@ -175,96 +190,105 @@
- value: 5xx
color: red
- title: "Latency"
+ type: "area-chart"
y_label: "Latency (ms)"
required_metrics:
- nginx_server_requestMsec
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_latency
query_range: 'avg(nginx_server_requestMsec{%{environment_filter}})'
label: Upstream
unit: ms
- title: "HTTP Error Rate"
+ type: "area-chart"
y_label: "HTTP 500 Errors / Sec"
required_metrics:
- nginx_server_requests
weight: 1
- queries:
+ metrics:
- id: response_metrics_nginx_http_error_rate
query_range: 'sum(rate(nginx_server_requests{code="5xx", %{environment_filter}}[2m]))'
label: HTTP Errors
unit: "errors / sec"
- group: System metrics (Kubernetes)
priority: 5
- metrics:
+ panels:
- title: "Memory Usage (Total)"
+ type: "area-chart"
y_label: "Total Memory Used"
required_metrics:
- container_memory_usage_bytes
weight: 4
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_total
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) /1024/1024/1024'
label: Total
unit: GB
- title: "Core Usage (Total)"
+ type: "area-chart"
y_label: "Total Cores"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 3
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_cores_total
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job)'
label: Total
unit: "cores"
- title: "Memory Usage (Pod average)"
+ type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_average
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
- title: "Canary: Memory Usage (Pod Average)"
+ type: "area-chart"
y_label: "Memory Used per Pod"
required_metrics:
- container_memory_usage_bytes
weight: 2
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_memory_average_canary
query_range: 'avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}) without (job)) /1024/1024'
label: Pod average
unit: MB
track: canary
- title: "Core Usage (Pod Average)"
+ type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 1
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_core_usage
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
- title: "Canary: Core Usage (Pod Average)"
+ type: "area-chart"
y_label: "Cores per Pod"
required_metrics:
- - container_cpu_usage_seconds_total
+ - container_cpu_usage_seconds_total
weight: 1
- queries:
+ metrics:
- id: system_metrics_kubernetes_container_core_usage_canary
query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^%{ci_environment_slug}-canary-(.*)",namespace="%{kube_namespace}"}[15m])) by (pod_name))'
label: Pod average
unit: "cores"
track: canary
- title: "Knative function invocations"
+ type: "area-chart"
y_label: "Invocations"
required_metrics:
- istio_revision_request_count
weight: 1
- queries:
+ metrics:
- id: system_metrics_knative_function_invocation_count
query_range: 'floor(sum(rate(istio_revision_request_count{destination_configuration="%{function_name}", destination_namespace="%{kube_namespace}"}[1m])*30))'
label: invocations / minute
diff --git a/db/importers/common_metrics_importer.rb b/db/importers/common_metrics_importer.rb
index deadd653ae9..195bde8f34a 100644
--- a/db/importers/common_metrics_importer.rb
+++ b/db/importers/common_metrics_importer.rb
@@ -53,7 +53,7 @@ module Importers
private
def process_content(&blk)
- content.map do |group|
+ content['panel_groups'].map do |group|
process_group(group, &blk)
end
end
@@ -63,28 +63,28 @@ module Importers
group: find_group_title_key(group['group'])
}
- group['metrics'].map do |metric|
- process_metric(metric, attributes, &blk)
+ group['panels'].map do |panel|
+ process_panel(panel, attributes, &blk)
end
end
- def process_metric(metric, attributes, &blk)
+ def process_panel(panel, attributes, &blk)
attributes = attributes.merge(
- title: metric['title'],
- y_label: metric['y_label'])
+ title: panel['title'],
+ y_label: panel['y_label'])
- metric['queries'].map do |query|
- process_metric_query(query, attributes, &blk)
+ panel['metrics'].map do |metric_details|
+ process_metric_details(metric_details, attributes, &blk)
end
end
- def process_metric_query(query, attributes, &blk)
+ def process_metric_details(metric_details, attributes, &blk)
attributes = attributes.merge(
- legend: query['label'],
- query: query['query_range'],
- unit: query['unit'])
+ legend: metric_details['label'],
+ query: metric_details['query_range'],
+ unit: metric_details['unit'])
- yield(query['id'], attributes)
+ yield(metric_details['id'], attributes)
end
def find_or_build_metric!(id)
diff --git a/spec/db/importers/common_metrics_importer_spec.rb b/spec/db/importers/common_metrics_importer_spec.rb
index 6133b17ac61..a717c8cd04d 100644
--- a/spec/db/importers/common_metrics_importer_spec.rb
+++ b/spec/db/importers/common_metrics_importer_spec.rb
@@ -23,10 +23,10 @@ describe Importers::CommonMetricsImporter do
subject { described_class.new }
context "does import common_metrics.yml" do
- let(:groups) { subject.content }
- let(:metrics) { groups.map { |group| group['metrics'] }.flatten }
- let(:queries) { metrics.map { |group| group['queries'] }.flatten }
- let(:query_ids) { queries.map { |query| query['id'] } }
+ let(:groups) { subject.content['panel_groups'] }
+ let(:panels) { groups.map { |group| group['panels'] }.flatten }
+ let(:metrics) { panels.map { |group| group['metrics'] }.flatten }
+ let(:metric_ids) { metrics.map { |metric| metric['id'] } }
before do
subject.execute
@@ -36,20 +36,20 @@ describe Importers::CommonMetricsImporter do
expect(PrometheusMetric.common.group(:group).count.count).to eq(groups.count)
end
- it "has the same amount of metrics" do
- expect(PrometheusMetric.common.group(:group, :title).count.count).to eq(metrics.count)
+ it "has the same amount of panels" do
+ expect(PrometheusMetric.common.group(:group, :title).count.count).to eq(panels.count)
end
- it "has the same amount of queries" do
- expect(PrometheusMetric.common.count).to eq(queries.count)
+ it "has the same amount of metrics" do
+ expect(PrometheusMetric.common.count).to eq(metrics.count)
end
it "does not have duplicate IDs" do
- expect(query_ids).to eq(query_ids.uniq)
+ expect(metric_ids).to eq(metric_ids.uniq)
end
it "imports all IDs" do
- expect(PrometheusMetric.common.pluck(:identifier)).to contain_exactly(*query_ids)
+ expect(PrometheusMetric.common.pluck(:identifier)).to contain_exactly(*metric_ids)
end
end
@@ -65,24 +65,26 @@ describe Importers::CommonMetricsImporter do
context 'does import properly all fields' do
let(:query_identifier) { 'response-metric' }
- let(:group) do
+ let(:dashboard) do
{
- group: 'Response metrics (NGINX Ingress)',
- metrics: [{
- title: "Throughput",
- y_label: "Requests / Sec",
- queries: [{
- id: query_identifier,
- query_range: 'my-query',
- unit: 'my-unit',
- label: 'status code'
+ panel_groups: [{
+ group: 'Response metrics (NGINX Ingress)',
+ panels: [{
+ title: "Throughput",
+ y_label: "Requests / Sec",
+ metrics: [{
+ id: query_identifier,
+ query_range: 'my-query',
+ unit: 'my-unit',
+ label: 'status code'
+ }]
}]
}]
}
end
before do
- expect(subject).to receive(:content) { [group.deep_stringify_keys] }
+ expect(subject).to receive(:content) { dashboard.deep_stringify_keys }
end
shared_examples 'stores metric' do