apparmor: Use a memory pool instead per-CPU caches
The get_buffers() macro may provide one or two buffers to the caller. Those buffers are pre-allocated on init for each CPU. By default it allocates 2* 2 * MAX_PATH * POSSIBLE_CPU which equals 64KiB on a system with 4 CPUs or 1MiB with 64 CPUs and so on. Replace the per-CPU buffers with a common memory pool which is shared across all CPUs. The pool grows on demand and never shrinks. The pool starts with two (UP) or four (SMP) elements. By using this pool it is possible to request a buffer and keeping preemption enabled which avoids the hack in profile_transition(). It has been pointed out by Tetsuo Handa that GFP_KERNEL allocations for small amount of memory do not fail. In order not to have an endless retry, __GFP_RETRY_MAYFAIL is passed (so the memory allocation is not repeated until success) and retried once hoping that in the meantime a buffer has been returned to the pool. Since now NULL is possible all allocation paths check the buffer pointer and return -ENOMEM on failure. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: John Johansen <john.johansen@canonical.com>
This commit is contained in:
committed by
John Johansen
parent
bf1d2ee7bc
commit
df323337e5
@@ -15,7 +15,6 @@
|
||||
#ifndef __AA_PATH_H
|
||||
#define __AA_PATH_H
|
||||
|
||||
|
||||
enum path_flags {
|
||||
PATH_IS_DIR = 0x1, /* path is a directory */
|
||||
PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
|
||||
@@ -30,51 +29,7 @@ int aa_path_name(const struct path *path, int flags, char *buffer,
|
||||
const char **name, const char **info,
|
||||
const char *disconnected);
|
||||
|
||||
#define MAX_PATH_BUFFERS 2
|
||||
|
||||
/* Per cpu buffers used during mediation */
|
||||
/* preallocated buffers to use during path lookups */
|
||||
struct aa_buffers {
|
||||
char *buf[MAX_PATH_BUFFERS];
|
||||
};
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/preempt.h>
|
||||
|
||||
DECLARE_PER_CPU(struct aa_buffers, aa_buffers);
|
||||
|
||||
#define ASSIGN(FN, A, X, N) ((X) = FN(A, N))
|
||||
#define EVAL1(FN, A, X) ASSIGN(FN, A, X, 0) /*X = FN(0)*/
|
||||
#define EVAL2(FN, A, X, Y...) \
|
||||
do { ASSIGN(FN, A, X, 1); EVAL1(FN, A, Y); } while (0)
|
||||
#define EVAL(FN, A, X...) CONCATENATE(EVAL, COUNT_ARGS(X))(FN, A, X)
|
||||
|
||||
#define for_each_cpu_buffer(I) for ((I) = 0; (I) < MAX_PATH_BUFFERS; (I)++)
|
||||
|
||||
#ifdef CONFIG_DEBUG_PREEMPT
|
||||
#define AA_BUG_PREEMPT_ENABLED(X) AA_BUG(preempt_count() <= 0, X)
|
||||
#else
|
||||
#define AA_BUG_PREEMPT_ENABLED(X) /* nop */
|
||||
#endif
|
||||
|
||||
#define __get_buffer(C, N) ({ \
|
||||
AA_BUG_PREEMPT_ENABLED("__get_buffer without preempt disabled"); \
|
||||
(C)->buf[(N)]; })
|
||||
|
||||
#define __get_buffers(C, X...) EVAL(__get_buffer, C, X)
|
||||
|
||||
#define __put_buffers(X, Y...) ((void)&(X))
|
||||
|
||||
#define get_buffers(X...) \
|
||||
do { \
|
||||
struct aa_buffers *__cpu_var = get_cpu_ptr(&aa_buffers); \
|
||||
__get_buffers(__cpu_var, X); \
|
||||
} while (0)
|
||||
|
||||
#define put_buffers(X, Y...) \
|
||||
do { \
|
||||
__put_buffers(X, Y); \
|
||||
put_cpu_ptr(&aa_buffers); \
|
||||
} while (0)
|
||||
char *aa_get_buffer(void);
|
||||
void aa_put_buffer(char *buf);
|
||||
|
||||
#endif /* __AA_PATH_H */
|
||||
|
||||
Reference in New Issue
Block a user