1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
|
/*
* AppArmor security module
*
* This file contains AppArmor security identifier (secid) manipulation fns
*
* Copyright 2009-2017 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
*
* AppArmor allocates a unique secid for every label used. If a label
* is replaced it receives the secid of the label it is replacing.
*/
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include "include/cred.h"
#include "include/lib.h"
#include "include/secid.h"
#include "include/label.h"
#include "include/policy_ns.h"
/*
* secids - do not pin labels with a refcount. They rely on the label
* properly updating/freeing them
*
* A singly linked free list is used to track secids that have been
* freed and reuse them before allocating new ones
*/
#define FREE_LIST_HEAD 1
static RADIX_TREE(aa_secids_map, GFP_ATOMIC);
static DEFINE_SPINLOCK(secid_lock);
static u32 alloced_secid = FREE_LIST_HEAD;
static u32 free_list = FREE_LIST_HEAD;
static unsigned long free_count;
/*
* TODO: allow policy to reserve a secid range?
* TODO: add secid pinning
* TODO: use secid_update in label replace
*/
#define SECID_MAX U32_MAX
/* TODO: mark free list as exceptional */
static void *to_ptr(u32 secid)
{
return (void *)
((((unsigned long) secid) << RADIX_TREE_EXCEPTIONAL_SHIFT));
}
static u32 to_secid(void *ptr)
{
return (u32) (((unsigned long) ptr) >> RADIX_TREE_EXCEPTIONAL_SHIFT);
}
/* TODO: tag free_list entries to mark them as different */
static u32 __pop(struct aa_label *label)
{
u32 secid = free_list;
void __rcu **slot;
void *entry;
if (free_list == FREE_LIST_HEAD)
return AA_SECID_INVALID;
slot = radix_tree_lookup_slot(&aa_secids_map, secid);
AA_BUG(!slot);
entry = radix_tree_deref_slot_protected(slot, &secid_lock);
free_list = to_secid(entry);
radix_tree_replace_slot(&aa_secids_map, slot, label);
free_count--;
return secid;
}
static void __push(u32 secid)
{
void __rcu **slot;
slot = radix_tree_lookup_slot(&aa_secids_map, secid);
AA_BUG(!slot);
radix_tree_replace_slot(&aa_secids_map, slot, to_ptr(free_list));
free_list = secid;
free_count++;
}
static struct aa_label * __secid_update(u32 secid, struct aa_label *label)
{
struct aa_label *old;
void __rcu **slot;
slot = radix_tree_lookup_slot(&aa_secids_map, secid);
AA_BUG(!slot);
old = radix_tree_deref_slot_protected(slot, &secid_lock);
radix_tree_replace_slot(&aa_secids_map, slot, label);
return old;
}
/**
* aa_secid_update - update a secid mapping to a new label
* @secid: secid to update
* @label: label the secid will now map to
*/
void aa_secid_update(u32 secid, struct aa_label *label)
{
struct aa_label *old;
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
old = __secid_update(secid, label);
spin_unlock_irqrestore(&secid_lock, flags);
}
/**
*
* see label for inverse aa_label_to_secid
*/
struct aa_label *aa_secid_to_label(u32 secid)
{
struct aa_label *label;
rcu_read_lock();
label = radix_tree_lookup(&aa_secids_map, secid);
rcu_read_unlock();
return label;
}
int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
/* TODO: cache secctx and ref count so we don't have to recreate */
struct aa_label *label = aa_secid_to_label(secid);
int len;
AA_BUG(!secdata);
AA_BUG(!seclen);
if (!label)
return -EINVAL;
if (secdata)
len = aa_label_asxprint(secdata, root_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT,
GFP_ATOMIC);
else
len = aa_label_snxprint(NULL, 0, root_ns, label,
FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT);
if (len < 0)
return -ENOMEM;
*seclen = len;
return 0;
}
int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
struct aa_label *label;
label = aa_label_strn_parse(&root_ns->unconfined->label, secdata,
seclen, GFP_KERNEL, false, false);
if (IS_ERR(label))
return PTR_ERR(label);
*secid = label->secid;
return 0;
}
void apparmor_release_secctx(char *secdata, u32 seclen)
{
kfree(secdata);
}
/**
* aa_alloc_secid - allocate a new secid for a profile
*/
u32 aa_alloc_secid(struct aa_label *label, gfp_t gfp)
{
unsigned long flags;
u32 secid;
/* racey, but at worst causes new allocation instead of reuse */
if (free_list == FREE_LIST_HEAD) {
bool preload = 0;
int res;
retry:
if (gfpflags_allow_blocking(gfp) && !radix_tree_preload(gfp))
preload = 1;
spin_lock_irqsave(&secid_lock, flags);
if (alloced_secid != SECID_MAX) {
secid = ++alloced_secid;
res = radix_tree_insert(&aa_secids_map, secid, label);
AA_BUG(res == -EEXIST);
} else {
secid = AA_SECID_INVALID;
}
spin_unlock_irqrestore(&secid_lock, flags);
if (preload)
radix_tree_preload_end();
} else {
spin_lock_irqsave(&secid_lock, flags);
/* remove entry from free list */
secid = __pop(label);
if (secid == AA_SECID_INVALID) {
spin_unlock_irqrestore(&secid_lock, flags);
goto retry;
}
spin_unlock_irqrestore(&secid_lock, flags);
}
return secid;
}
/**
* aa_free_secid - free a secid
* @secid: secid to free
*/
void aa_free_secid(u32 secid)
{
unsigned long flags;
spin_lock_irqsave(&secid_lock, flags);
__push(secid);
spin_unlock_irqrestore(&secid_lock, flags);
}
|