summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLennart Poettering <lennart@poettering.net>2017-09-07 16:31:01 +0200
committerLennart Poettering <lennart@poettering.net>2017-09-22 15:24:55 +0200
commitcf3b4be101acb396fe3b9504685a970be7f86764 (patch)
tree2b3b748916c0e054027e825acf7dee7a1556518d
parent58d83430e1276fe8d1224c2b5f76e756d143a375 (diff)
downloadsystemd-cf3b4be101acb396fe3b9504685a970be7f86764.tar.gz
cgroup: refuse to return accounting data if accounting isn't turned on
We used to be a bit sloppy on this, and handed out accounting data even for units where accounting wasn't explicitly enabled. Let's be stricter here, so that we know the accounting data is actually fully valid. This is necessary, as the accounting data is no longer stored exclusively in cgroupfs, but is partly maintained external of that, and flushed during unit starts. We should hence only expose accounting data we really know is fully current.
-rw-r--r--src/core/cgroup.c35
1 files changed, 35 insertions, 0 deletions
diff --git a/src/core/cgroup.c b/src/core/cgroup.c
index e9cb0d35c9..9a0d374aa8 100644
--- a/src/core/cgroup.c
+++ b/src/core/cgroup.c
@@ -2088,11 +2088,18 @@ int manager_notify_cgroup_empty(Manager *m, const char *cgroup) {
int unit_get_memory_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->memory_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
@@ -2116,11 +2123,18 @@ int unit_get_memory_current(Unit *u, uint64_t *ret) {
int unit_get_tasks_current(Unit *u, uint64_t *ret) {
_cleanup_free_ char *v = NULL;
+ CGroupContext *cc;
int r;
assert(u);
assert(ret);
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->tasks_accounting)
+ return -ENODATA;
+
if (!u->cgroup_path)
return -ENODATA;
@@ -2187,6 +2201,7 @@ static int unit_get_cpu_usage_raw(Unit *u, nsec_t *ret) {
}
int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
+ CGroupContext *cc;
nsec_t ns;
int r;
@@ -2196,6 +2211,12 @@ int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
* started. If the cgroup has been removed already, returns the last cached value. To cache the value, simply
* call this function with a NULL return value. */
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->cpu_accounting)
+ return -ENODATA;
+
r = unit_get_cpu_usage_raw(u, &ns);
if (r == -ENODATA && u->cpu_usage_last != NSEC_INFINITY) {
/* If we can't get the CPU usage anymore (because the cgroup was already removed, for example), use our
@@ -2225,6 +2246,7 @@ int unit_get_ip_accounting(
CGroupIPAccountingMetric metric,
uint64_t *ret) {
+ CGroupContext *cc;
uint64_t value;
int fd, r;
@@ -2233,6 +2255,19 @@ int unit_get_ip_accounting(
assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
assert(ret);
+ /* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
+ * inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
+ * anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
+ * filters. */
+ if (u->type == UNIT_SLICE)
+ return -ENODATA;
+
+ cc = unit_get_cgroup_context(u);
+ if (!cc)
+ return -ENODATA;
+ if (!cc->ip_accounting)
+ return -ENODATA;
+
fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
u->ip_accounting_ingress_map_fd :
u->ip_accounting_egress_map_fd;