summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/core/cgroup.c190
-rw-r--r--src/core/cgroup.h18
-rw-r--r--src/core/dbus-unit.c38
-rw-r--r--src/core/load-fragment-gperf.gperf.m43
-rw-r--r--src/core/mount.c9
-rw-r--r--src/core/mount.h2
-rw-r--r--src/core/scope.c3
-rw-r--r--src/core/service.c9
-rw-r--r--src/core/service.h2
-rw-r--r--src/core/slice.c3
-rw-r--r--src/core/socket.c9
-rw-r--r--src/core/socket.h2
-rw-r--r--src/core/swap.c9
-rw-r--r--src/core/swap.h2
-rw-r--r--src/core/unit.c15
-rw-r--r--src/core/unit.h8
16 files changed, 280 insertions, 42 deletions
diff --git a/src/core/cgroup.c b/src/core/cgroup.c
index 62cbe08f13..47c2ad98a8 100644
--- a/src/core/cgroup.c
+++ b/src/core/cgroup.c
@@ -21,6 +21,7 @@
#include <fnmatch.h>
#include "alloc-util.h"
+#include "bpf-firewall.h"
#include "cgroup-util.h"
#include "cgroup.h"
#include "fd-util.h"
@@ -30,9 +31,9 @@
#include "path-util.h"
#include "process-util.h"
#include "special.h"
+#include "stdio-util.h"
#include "string-table.h"
#include "string-util.h"
-#include "stdio-util.h"
#define CGROUP_CPU_QUOTA_PERIOD_USEC ((usec_t) 100 * USEC_PER_MSEC)
@@ -648,7 +649,27 @@ static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_
"Failed to set %s: %m", file);
}
-static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
+static void cgroup_apply_firewall(Unit *u, CGroupContext *c) {
+ int r;
+
+ if (u->type == UNIT_SLICE) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is
+ * not recursive we don't ever touch the bpf on them */
+ return;
+
+ r = bpf_firewall_compile(u);
+ if (r < 0)
+ return;
+
+ (void) bpf_firewall_install(u);
+ return;
+}
+
+static void cgroup_context_apply(
+ Unit *u,
+ CGroupMask apply_mask,
+ bool apply_bpf,
+ ManagerState state) {
+
const char *path;
CGroupContext *c;
bool is_root;
@@ -662,7 +683,8 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
assert(c);
assert(path);
- if (mask == 0)
+ /* Nothing to do? Exit early! */
+ if (apply_mask == 0 && !apply_bpf)
return;
/* Some cgroup attributes are not supported on the root cgroup,
@@ -676,9 +698,11 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
* cgroup trees (assuming we are running in a container then),
* and missing cgroups, i.e. EROFS and ENOENT. */
- if ((mask & CGROUP_MASK_CPU) && !is_root) {
- bool has_weight = cgroup_context_has_cpu_weight(c);
- bool has_shares = cgroup_context_has_cpu_shares(c);
+ if ((apply_mask & CGROUP_MASK_CPU) && !is_root) {
+ bool has_weight, has_shares;
+
+ has_weight = cgroup_context_has_cpu_weight(c);
+ has_shares = cgroup_context_has_cpu_shares(c);
if (cg_all_unified() > 0) {
uint64_t weight;
@@ -715,7 +739,7 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
}
}
- if (mask & CGROUP_MASK_IO) {
+ if (apply_mask & CGROUP_MASK_IO) {
bool has_io = cgroup_context_has_io_config(c);
bool has_blockio = cgroup_context_has_blockio_config(c);
@@ -792,7 +816,7 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
}
}
- if (mask & CGROUP_MASK_BLKIO) {
+ if (apply_mask & CGROUP_MASK_BLKIO) {
bool has_io = cgroup_context_has_io_config(c);
bool has_blockio = cgroup_context_has_blockio_config(c);
@@ -859,7 +883,7 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
}
}
- if ((mask & CGROUP_MASK_MEMORY) && !is_root) {
+ if ((apply_mask & CGROUP_MASK_MEMORY) && !is_root) {
if (cg_all_unified() > 0) {
uint64_t max, swap_max = CGROUP_LIMIT_MAX;
@@ -899,7 +923,7 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
}
}
- if ((mask & CGROUP_MASK_DEVICES) && !is_root) {
+ if ((apply_mask & CGROUP_MASK_DEVICES) && !is_root) {
CGroupDeviceAllow *a;
/* Changing the devices list of a populated cgroup
@@ -963,7 +987,7 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
}
}
- if ((mask & CGROUP_MASK_PIDS) && !is_root) {
+ if ((apply_mask & CGROUP_MASK_PIDS) && !is_root) {
if (c->tasks_max != CGROUP_LIMIT_MAX) {
char buf[DECIMAL_STR_MAX(uint64_t) + 2];
@@ -977,6 +1001,9 @@ static void cgroup_context_apply(Unit *u, CGroupMask mask, ManagerState state) {
log_unit_full(u, IN_SET(r, -ENOENT, -EROFS, -EACCES) ? LOG_DEBUG : LOG_WARNING, r,
"Failed to set pids.max: %m");
}
+
+ if (apply_bpf)
+ cgroup_apply_firewall(u, c);
}
CGroupMask cgroup_context_get_mask(CGroupContext *c) {
@@ -1123,6 +1150,39 @@ CGroupMask unit_get_enable_mask(Unit *u) {
return mask;
}
+bool unit_get_needs_bpf(Unit *u) {
+ CGroupContext *c;
+ Unit *p;
+ assert(u);
+
+ /* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
+ * moment. */
+ if (u->type == UNIT_SLICE)
+ return false;
+
+ c = unit_get_cgroup_context(u);
+ if (!c)
+ return false;
+
+ if (c->ip_accounting ||
+ c->ip_address_allow ||
+ c->ip_address_deny)
+ return true;
+
+ /* If any parent slice has an IP access list defined, it applies too */
+ for (p = UNIT_DEREF(u->slice); p; p = UNIT_DEREF(p->slice)) {
+ c = unit_get_cgroup_context(p);
+ if (!c)
+ return false;
+
+ if (c->ip_address_allow ||
+ c->ip_address_deny)
+ return true;
+ }
+
+ return false;
+}
+
/* Recurse from a unit up through its containing slices, propagating
* mask bits upward. A unit is also member of itself. */
void unit_update_cgroup_members_masks(Unit *u) {
@@ -1298,7 +1358,8 @@ int unit_watch_cgroup(Unit *u) {
static int unit_create_cgroup(
Unit *u,
CGroupMask target_mask,
- CGroupMask enable_mask) {
+ CGroupMask enable_mask,
+ bool needs_bpf) {
CGroupContext *c;
int r;
@@ -1340,6 +1401,7 @@ static int unit_create_cgroup(
u->cgroup_realized = true;
u->cgroup_realized_mask = target_mask;
u->cgroup_enabled_mask = enable_mask;
+ u->cgroup_bpf_state = needs_bpf ? UNIT_CGROUP_BPF_ON : UNIT_CGROUP_BPF_OFF;
if (u->type != UNIT_SLICE && !c->delegate) {
@@ -1389,10 +1451,19 @@ static void cgroup_xattr_apply(Unit *u) {
log_unit_warning_errno(u, r, "Failed to set invocation ID on control group %s, ignoring: %m", u->cgroup_path);
}
-static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask enable_mask) {
+static bool unit_has_mask_realized(
+ Unit *u,
+ CGroupMask target_mask,
+ CGroupMask enable_mask,
+ bool needs_bpf) {
+
assert(u);
- return u->cgroup_realized && u->cgroup_realized_mask == target_mask && u->cgroup_enabled_mask == enable_mask;
+ return u->cgroup_realized &&
+ u->cgroup_realized_mask == target_mask &&
+ u->cgroup_enabled_mask == enable_mask &&
+ ((needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_ON) ||
+ (!needs_bpf && u->cgroup_bpf_state == UNIT_CGROUP_BPF_OFF));
}
/* Check if necessary controllers and attributes for a unit are in place.
@@ -1403,6 +1474,7 @@ static bool unit_has_mask_realized(Unit *u, CGroupMask target_mask, CGroupMask e
* Returns 0 on success and < 0 on failure. */
static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
CGroupMask target_mask, enable_mask;
+ bool needs_bpf, apply_bpf;
int r;
assert(u);
@@ -1414,10 +1486,16 @@ static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
target_mask = unit_get_target_mask(u);
enable_mask = unit_get_enable_mask(u);
+ needs_bpf = unit_get_needs_bpf(u);
- if (unit_has_mask_realized(u, target_mask, enable_mask))
+ if (unit_has_mask_realized(u, target_mask, enable_mask, needs_bpf))
return 0;
+ /* Make sure we apply the BPF filters either when one is configured, or if none is configured but previously
+ * the state was anything but off. This way, if a unit with a BPF filter applied is reconfigured to lose it
+ * this will trickle down properly to cgroupfs. */
+ apply_bpf = needs_bpf || u->cgroup_bpf_state != UNIT_CGROUP_BPF_OFF;
+
/* First, realize parents */
if (UNIT_ISSET(u->slice)) {
r = unit_realize_cgroup_now(UNIT_DEREF(u->slice), state);
@@ -1426,12 +1504,12 @@ static int unit_realize_cgroup_now(Unit *u, ManagerState state) {
}
/* And then do the real work */
- r = unit_create_cgroup(u, target_mask, enable_mask);
+ r = unit_create_cgroup(u, target_mask, enable_mask, needs_bpf);
if (r < 0)
return r;
/* Finally, apply the necessary attributes. */
- cgroup_context_apply(u, target_mask, state);
+ cgroup_context_apply(u, target_mask, apply_bpf, state);
cgroup_xattr_apply(u);
return 0;
@@ -1495,7 +1573,10 @@ static void unit_queue_siblings(Unit *u) {
/* If the unit doesn't need any new controllers
* and has current ones realized, it doesn't need
* any changes. */
- if (unit_has_mask_realized(m, unit_get_target_mask(m), unit_get_enable_mask(m)))
+ if (unit_has_mask_realized(m,
+ unit_get_target_mask(m),
+ unit_get_enable_mask(m),
+ unit_get_needs_bpf(m)))
continue;
unit_add_to_cgroup_queue(m);
@@ -2121,7 +2202,34 @@ int unit_get_cpu_usage(Unit *u, nsec_t *ret) {
return 0;
}
-int unit_reset_cpu_usage(Unit *u) {
+int unit_get_ip_accounting(
+ Unit *u,
+ CGroupIPAccountingMetric metric,
+ uint64_t *ret) {
+
+ int fd, r;
+
+ assert(u);
+ assert(metric >= 0);
+ assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
+ assert(ret);
+
+ fd = IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_INGRESS_PACKETS) ?
+ u->ip_accounting_ingress_map_fd :
+ u->ip_accounting_egress_map_fd;
+
+ if (fd < 0)
+ return -ENODATA;
+
+ if (IN_SET(metric, CGROUP_IP_INGRESS_BYTES, CGROUP_IP_EGRESS_BYTES))
+ r = bpf_firewall_read_accounting(fd, ret, NULL);
+ else
+ r = bpf_firewall_read_accounting(fd, NULL, ret);
+
+ return r;
+}
+
+int unit_reset_cpu_accounting(Unit *u) {
nsec_t ns;
int r;
@@ -2139,6 +2247,20 @@ int unit_reset_cpu_usage(Unit *u) {
return 0;
}
+int unit_reset_ip_accounting(Unit *u) {
+ int r = 0, q = 0;
+
+ assert(u);
+
+ if (u->ip_accounting_ingress_map_fd >= 0)
+ r = bpf_firewall_reset_accounting(u->ip_accounting_ingress_map_fd);
+
+ if (u->ip_accounting_egress_map_fd >= 0)
+ q = bpf_firewall_reset_accounting(u->ip_accounting_egress_map_fd);
+
+ return r < 0 ? r : q;
+}
+
bool unit_cgroup_delegate(Unit *u) {
CGroupContext *c;
@@ -2174,6 +2296,36 @@ void unit_invalidate_cgroup(Unit *u, CGroupMask m) {
unit_add_to_cgroup_queue(u);
}
+void unit_invalidate_cgroup_bpf(Unit *u) {
+ assert(u);
+
+ if (!UNIT_HAS_CGROUP_CONTEXT(u))
+ return;
+
+ if (u->cgroup_bpf_state == UNIT_CGROUP_BPF_INVALIDATED)
+ return;
+
+ u->cgroup_bpf_state = UNIT_CGROUP_BPF_INVALIDATED;
+ unit_add_to_cgroup_queue(u);
+
+ /* If we are a slice unit, we also need to put compile a new BPF program for all our children, as the IP access
+ * list of our children includes our own. */
+ if (u->type == UNIT_SLICE) {
+ Unit *member;
+ Iterator i;
+
+ SET_FOREACH(member, u->dependencies[UNIT_BEFORE], i) {
+ if (member == u)
+ continue;
+
+ if (UNIT_DEREF(member->slice) != u)
+ continue;
+
+ unit_invalidate_cgroup_bpf(member);
+ }
+ }
+}
+
void manager_invalidate_startup_units(Manager *m) {
Iterator i;
Unit *u;
diff --git a/src/core/cgroup.h b/src/core/cgroup.h
index 2baf4d20e9..fcbf8d01ca 100644
--- a/src/core/cgroup.h
+++ b/src/core/cgroup.h
@@ -128,6 +128,16 @@ struct CGroupContext {
bool delegate;
};
+/* Used when querying IP accounting data */
+typedef enum CGroupIPAccountingMetric {
+ CGROUP_IP_INGRESS_BYTES,
+ CGROUP_IP_INGRESS_PACKETS,
+ CGROUP_IP_EGRESS_BYTES,
+ CGROUP_IP_EGRESS_PACKETS,
+ _CGROUP_IP_ACCOUNTING_METRIC_MAX,
+ _CGROUP_IP_ACCOUNTING_METRIC_INVALID = -1,
+} CGroupIPAccountingMetric;
+
#include "unit.h"
void cgroup_context_init(CGroupContext *c);
@@ -150,6 +160,8 @@ CGroupMask unit_get_subtree_mask(Unit *u);
CGroupMask unit_get_target_mask(Unit *u);
CGroupMask unit_get_enable_mask(Unit *u);
+bool unit_get_needs_bpf(Unit *u);
+
void unit_update_cgroup_members_masks(Unit *u);
char *unit_default_cgroup_path(Unit *u);
@@ -177,7 +189,10 @@ int unit_watch_all_pids(Unit *u);
int unit_get_memory_current(Unit *u, uint64_t *ret);
int unit_get_tasks_current(Unit *u, uint64_t *ret);
int unit_get_cpu_usage(Unit *u, nsec_t *ret);
-int unit_reset_cpu_usage(Unit *u);
+int unit_get_ip_accounting(Unit *u, CGroupIPAccountingMetric metric, uint64_t *ret);
+
+int unit_reset_cpu_accounting(Unit *u);
+int unit_reset_ip_accounting(Unit *u);
bool unit_cgroup_delegate(Unit *u);
@@ -185,6 +200,7 @@ int unit_notify_cgroup_empty(Unit *u);
int manager_notify_cgroup_empty(Manager *m, const char *group);
void unit_invalidate_cgroup(Unit *u, CGroupMask m);
+void unit_invalidate_cgroup_bpf(Unit *u);
void manager_invalidate_startup_units(Manager *m);
diff --git a/src/core/dbus-unit.c b/src/core/dbus-unit.c
index b0645ce294..8d2ae964d8 100644
--- a/src/core/dbus-unit.c
+++ b/src/core/dbus-unit.c
@@ -20,6 +20,7 @@
#include "sd-bus.h"
#include "alloc-util.h"
+#include "bpf-firewall.h"
#include "bus-common-errors.h"
#include "cgroup-util.h"
#include "dbus-job.h"
@@ -1051,6 +1052,39 @@ int bus_unit_method_get_processes(sd_bus_message *message, void *userdata, sd_bu
return sd_bus_send(NULL, reply, NULL);
}
+static int property_get_ip_counter(
+ sd_bus *bus,
+ const char *path,
+ const char *interface,
+ const char *property,
+ sd_bus_message *reply,
+ void *userdata,
+ sd_bus_error *error) {
+
+ CGroupIPAccountingMetric metric;
+ uint64_t value = (uint64_t) -1;
+ Unit *u = userdata;
+
+ assert(bus);
+ assert(reply);
+ assert(property);
+ assert(u);
+
+ if (streq(property, "IPIngressBytes"))
+ metric = CGROUP_IP_INGRESS_BYTES;
+ else if (streq(property, "IPIngressPackets"))
+ metric = CGROUP_IP_INGRESS_PACKETS;
+ else if (streq(property, "IPEgressBytes"))
+ metric = CGROUP_IP_EGRESS_BYTES;
+ else {
+ assert(streq(property, "IPEgressPackets"));
+ metric = CGROUP_IP_EGRESS_PACKETS;
+ }
+
+ (void) unit_get_ip_accounting(u, metric, &value);
+ return sd_bus_message_append(reply, "t", value);
+}
+
const sd_bus_vtable bus_unit_cgroup_vtable[] = {
SD_BUS_VTABLE_START(0),
SD_BUS_PROPERTY("Slice", "s", property_get_slice, 0, 0),
@@ -1058,6 +1092,10 @@ const sd_bus_vtable bus_unit_cgroup_vtable[] = {
SD_BUS_PROPERTY("MemoryCurrent", "t", property_get_current_memory, 0, 0),
SD_BUS_PROPERTY("CPUUsageNSec", "t", property_get_cpu_usage, 0, 0),
SD_BUS_PROPERTY("TasksCurrent", "t", property_get_current_tasks, 0, 0),
+ SD_BUS_PROPERTY("IPIngressBytes", "t", property_get_ip_counter, 0, 0),
+ SD_BUS_PROPERTY("IPIngressPackets", "t", property_get_ip_counter, 0, 0),
+ SD_BUS_PROPERTY("IPEgressBytes", "t", property_get_ip_counter, 0, 0),
+ SD_BUS_PROPERTY("IPEgressPackets", "t", property_get_ip_counter, 0, 0),
SD_BUS_METHOD("GetProcesses", NULL, "a(sus)", bus_unit_method_get_processes, SD_BUS_VTABLE_UNPRIVILEGED),
SD_BUS_VTABLE_END
};
diff --git a/src/core/load-fragment-gperf.gperf.m4 b/src/core/load-fragment-gperf.gperf.m4
index f7d5f24861..cc8aad05a0 100644
--- a/src/core/load-fragment-gperf.gperf.m4
+++ b/src/core/load-fragment-gperf.gperf.m4
@@ -174,6 +174,9 @@ $1.BlockIOWriteBandwidth, config_parse_blockio_bandwidth, 0,
$1.TasksAccounting, config_parse_bool, 0, offsetof($1, cgroup_context.tasks_accounting)
$1.TasksMax, config_parse_tasks_max, 0, offsetof($1, cgroup_context.tasks_max)
$1.Delegate, config_parse_bool, 0, offsetof($1, cgroup_context.delegate)
+$1.IPAccounting, config_parse_bool, 0, offsetof($1, cgroup_context.ip_accounting)
+$1.IPAddressAllow, config_parse_ip_address_access, 0, offsetof($1, cgroup_context.ip_address_allow)
+$1.IPAddressDeny, config_parse_ip_address_access, 0, offsetof($1, cgroup_context.ip_address_deny)
$1.NetClass, config_parse_warn_compat, DISABLED_LEGACY, 0'
)m4_dnl
Unit.Description, config_parse_unit_string_printf, 0, offsetof(Unit, description)
diff --git a/src/core/mount.c b/src/core/mount.c
index 472f54242c..46bcf37ae0 100644
--- a/src/core/mount.c
+++ b/src/core/mount.c
@@ -754,9 +754,10 @@ static int mount_spawn(Mount *m, ExecCommand *c, pid_t *_pid) {
assert(_pid);
(void) unit_realize_cgroup(UNIT(m));
- if (m->reset_cpu_usage) {
- (void) unit_reset_cpu_usage(UNIT(m));
- m->reset_cpu_usage = false;
+ if (m->reset_accounting) {
+ (void) unit_reset_cpu_accounting(UNIT(m));
+ (void) unit_reset_ip_accounting(UNIT(m));
+ m->reset_accounting = false;
}
r = unit_setup_exec_runtime(UNIT(m));
@@ -1044,7 +1045,7 @@ static int mount_start(Unit *u) {
m->result = MOUNT_SUCCESS;
m->reload_result = MOUNT_SUCCESS;
- m->reset_cpu_usage = true;
+ m->reset_accounting = true;
mount_enter_mounting(m);
return 1;
diff --git a/src/core/mount.h b/src/core/mount.h
index 9f7326ba6a..f81e4217df 100644
--- a/src/core/mount.h
+++ b/src/core/mount.h
@@ -67,7 +67,7 @@ struct Mount {
bool just_mounted:1;
bool just_changed:1;
- bool reset_cpu_usage:1;
+ bool reset_accounting:1;
bool sloppy_options;
diff --git a/src/core/scope.c b/src/core/scope.c
index a1d5c1cfd5..8f9df3b9b7 100644
--- a/src/core/scope.c
+++ b/src/core/scope.c
@@ -333,7 +333,8 @@ static int scope_start(Unit *u) {
return r;
(void) unit_realize_cgroup(u);
- (void) unit_reset_cpu_usage(u);
+ (void) unit_reset_cpu_accounting(u);
+ (void) unit_reset_ip_accounting(u);
r = unit_attach_pids_to_cgroup(u);
if (r < 0) {
diff --git a/src/core/service.c b/src/core/service.c
index 2144884f9e..b0ce9bfcfa 100644
--- a/src/core/service.c
+++ b/src/core/service.c
@@ -1244,9 +1244,10 @@ static int service_spawn(
}
(void) unit_realize_cgroup(UNIT(s));
- if (s->reset_cpu_usage) {
- (void) unit_reset_cpu_usage(UNIT(s));
- s->reset_cpu_usage = false;
+ if (s->reset_accounting) {
+ (void) unit_reset_cpu_accounting(UNIT(s));
+ (void) unit_reset_ip_accounting(UNIT(s));
+ s->reset_accounting = false;
}
r = unit_setup_exec_runtime(UNIT(s));
@@ -2138,7 +2139,7 @@ static int service_start(Unit *u) {
s->main_pid_known = false;
s->main_pid_alien = false;
s->forbid_restart = false;
- s->reset_cpu_usage = true;
+ s->reset_accounting = true;
s->status_text = mfree(s->status_text);
s->status_errno = 0;
diff --git a/src/core/service.h b/src/core/service.h
index 0ac8bc9a67..16b700637c 100644
--- a/src/core/service.h
+++ b/src/core/service.h
@@ -165,7 +165,7 @@ struct Service {
bool forbid_restart:1;
bool start_timeout_defined:1;
- bool reset_cpu_usage:1;
+ bool reset_accounting:1;
char *bus_name;
char *bus_name_owner; /* unique name of the current owner */
diff --git a/src/core/slice.c b/src/core/slice.c
index ed5d3fd701..b15f751c82 100644
--- a/src/core/slice.c
+++ b/src/core/slice.c
@@ -222,7 +222,8 @@ static int slice_start(Unit *u) {
return r;
(void) unit_realize_cgroup(u);
- (void) unit_reset_cpu_usage(u);
+ (void) unit_reset_cpu_accounting(u);
+ (void) unit_reset_ip_accounting(u);
slice_set_state(t, SLICE_ACTIVE);
return 1;
diff --git a/src/core/socket.c b/src/core/socket.c
index a82e7d2187..ec901fbdd7 100644
--- a/src/core/socket.c
+++ b/src/core/socket.c
@@ -1775,9 +1775,10 @@ static int socket_spawn(Socket *s, ExecCommand *c, pid_t *_pid) {
assert(_pid);
(void) unit_realize_cgroup(UNIT(s));
- if (s->reset_cpu_usage) {
- (void) unit_reset_cpu_usage(UNIT(s));
- s->reset_cpu_usage = false;
+ if (s->reset_accounting) {
+ (void) unit_reset_cpu_accounting(UNIT(s));
+ (void) unit_reset_ip_accounting(UNIT(s));
+ s->reset_accounting = false;
}
r = unit_setup_exec_runtime(UNIT(s));
@@ -2373,7 +2374,7 @@ static int socket_start(Unit *u) {
return r;
s->result = SOCKET_SUCCESS;
- s->reset_cpu_usage = true;
+ s->reset_accounting = true;
socket_enter_start_pre(s);
return 1;
diff --git a/src/core/socket.h b/src/core/socket.h
index 89f4664510..8c263963c4 100644
--- a/src/core/socket.h
+++ b/src/core/socket.h
@@ -161,7 +161,7 @@ struct Socket {
char *user, *group;
- bool reset_cpu_usage:1;
+ bool reset_accounting:1;
char *fdname;
diff --git a/src/core/swap.c b/src/core/swap.c
index 303f62d25a..d58f68458b 100644
--- a/src/core/swap.c
+++ b/src/core/swap.c
@@ -620,9 +620,10 @@ static int swap_spawn(Swap *s, ExecCommand *c, pid_t *_pid) {
assert(_pid);
(void) unit_realize_cgroup(UNIT(s));
- if (s->reset_cpu_usage) {
- (void) unit_reset_cpu_usage(UNIT(s));
- s->reset_cpu_usage = false;
+ if (s->reset_accounting) {
+ (void) unit_reset_cpu_accounting(UNIT(s));
+ (void) unit_reset_ip_accounting(UNIT(s));
+ s->reset_accounting = false;
}
r = unit_setup_exec_runtime(UNIT(s));
@@ -861,7 +862,7 @@ static int swap_start(Unit *u) {
return r;
s->result = SWAP_SUCCESS;
- s->reset_cpu_usage = true;
+ s->reset_accounting = true;
swap_enter_activating(s);
return 1;
diff --git a/src/core/swap.h b/src/core/swap.h
index b0ef50f1e8..45da63c5e2 100644
--- a/src/core/swap.h
+++ b/src/core/swap.h
@@ -70,7 +70,7 @@ struct Swap {
bool is_active:1;
bool just_activated:1;
- bool reset_cpu_usage:1;
+ bool reset_accounting:1;
SwapResult result;
diff --git a/src/core/unit.c b/src/core/unit.c
index 6451b75560..02c8b2a45d 100644
--- a/src/core/unit.c
+++ b/src/core/unit.c
@@ -2818,6 +2818,7 @@ int unit_serialize(Unit *u, FILE *f, FDSet *fds, bool serialize_jobs) {
unit_serialize_item(u, f, "cgroup-realized", yes_no(u->cgroup_realized));
(void) unit_serialize_cgroup_mask(f, "cgroup-realized-mask", u->cgroup_realized_mask);
(void) unit_serialize_cgroup_mask(f, "cgroup-enabled-mask", u->cgroup_enabled_mask);
+ unit_serialize_item_format(u, f, "cgroup-bpf-realized", "%i", u->cgroup_bpf_state);
if (uid_is_valid(u->ref_uid))
unit_serialize_item_format(u, f, "ref-uid", UID_FMT, u->ref_uid);
@@ -3089,6 +3090,20 @@ int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
log_unit_debug(u, "Failed to parse cgroup-enabled-mask %s, ignoring.", v);
continue;
+ } else if (streq(l, "cgroup-bpf-realized")) {
+ int i;
+
+ r = safe_atoi(v, &i);
+ if (r < 0)
+ log_unit_debug(u, "Failed to parse cgroup BPF state %s, ignoring.", v);
+ else
+ u->cgroup_bpf_state =
+ i < 0 ? UNIT_CGROUP_BPF_INVALIDATED :
+ i > 0 ? UNIT_CGROUP_BPF_ON :
+ UNIT_CGROUP_BPF_OFF;
+
+ continue;
+
} else if (streq(l, "ref-uid")) {
uid_t uid;
diff --git a/src/core/unit.h b/src/core/unit.h
index 95c41fccea..598cc6ede6 100644
--- a/src/core/unit.h
+++ b/src/core/unit.h
@@ -71,6 +71,12 @@ struct UnitRef {
LIST_FIELDS(UnitRef, refs);
};
+typedef enum UnitCGroupBPFState {
+ UNIT_CGROUP_BPF_OFF = 0,
+ UNIT_CGROUP_BPF_ON = 1,
+ UNIT_CGROUP_BPF_INVALIDATED = -1,
+} UnitCGroupBPFState;
+
struct Unit {
Manager *manager;
@@ -267,6 +273,8 @@ struct Unit {
bool cgroup_members_mask_valid:1;
bool cgroup_subtree_mask_valid:1;
+ UnitCGroupBPFState cgroup_bpf_state:2;
+
bool start_limit_hit:1;
/* Did we already invoke unit_coldplug() for this unit? */