Merge branch 'akpm' (patches from Andrew)

Merge final set of updates from Andrew Morton:

 - a series to make IMA play better across kexec

 - a handful of random fixes

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  printk: fix typo in CONSOLE_LOGLEVEL_DEFAULT help text
  ratelimit: fix WARN_ON_RATELIMIT return value
  kcov: make kcov work properly with KASLR enabled
  arm64: setup: introduce kaslr_offset()
  mm: fadvise: avoid expensive remote LRU cache draining after FADV_DONTNEED
  ima: platform-independent hash value
  ima: define a canonical binary_runtime_measurements list format
  ima: support restoring multiple template formats
  ima: store the builtin/custom template definitions in a list
  ima: on soft reboot, save the measurement list
  powerpc: ima: send the kexec buffer to the next kernel
  ima: maintain memory size needed for serializing the measurement list
  ima: permit duplicate measurement list entries
  ima: on soft reboot, restore the measurement list
  powerpc: ima: get the kexec buffer passed by the previous kernel
This commit is contained in:
Linus Torvalds 2016-12-20 15:24:32 -08:00
commit 3eb86259ec
28 changed files with 941 additions and 46 deletions

View File

@ -1441,6 +1441,10 @@
The builtin appraise policy appraises all files
owned by uid=0.
ima_canonical_fmt [IMA]
Use the canonical format for the binary runtime
measurements, instead of host native format.
ima_hash= [IMA]
Format: { md5 | sha1 | rmd160 | sha256 | sha384
| sha512 | ... }

View File

@ -5,6 +5,9 @@
config KEXEC_CORE
bool
config HAVE_IMA_KEXEC
bool
config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING

View File

@ -165,6 +165,11 @@ extern u64 kimage_vaddr;
/* the offset between the kernel virtual and physical mappings */
extern u64 kimage_voffset;
static inline unsigned long kaslr_offset(void)
{
return kimage_vaddr - KIMAGE_VADDR;
}
/*
* Allow all memory at the discovery stage. We will clip it later.
*/

View File

@ -338,11 +338,11 @@ subsys_initcall(topology_init);
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
u64 const kaslr_offset = kimage_vaddr - KIMAGE_VADDR;
const unsigned long offset = kaslr_offset();
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset > 0) {
pr_emerg("Kernel Offset: 0x%llx from 0x%lx\n",
kaslr_offset, KIMAGE_VADDR);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
offset, KIMAGE_VADDR);
} else {
pr_emerg("Kernel Offset: disabled\n");
}

View File

@ -469,6 +469,7 @@ config KEXEC
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
select HAVE_IMA_KEXEC
select BUILD_BIN2C
depends on PPC64
depends on CRYPTO=y

View File

@ -0,0 +1,29 @@
#ifndef _ASM_POWERPC_IMA_H
#define _ASM_POWERPC_IMA_H
struct kimage;
int ima_get_kexec_buffer(void **addr, size_t *size);
int ima_free_kexec_buffer(void);
#ifdef CONFIG_IMA
void remove_ima_buffer(void *fdt, int chosen_node);
#else
static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
#endif
#ifdef CONFIG_IMA_KEXEC
int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
size_t size);
int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
#else
static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
int chosen_node)
{
remove_ima_buffer(fdt, chosen_node);
return 0;
}
#endif /* CONFIG_IMA_KEXEC */
#endif /* _ASM_POWERPC_IMA_H */

View File

@ -94,11 +94,22 @@ static inline bool kdump_in_progress(void)
#ifdef CONFIG_KEXEC_FILE
extern struct kexec_file_ops kexec_elf64_ops;
#ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
phys_addr_t ima_buffer_addr;
size_t ima_buffer_size;
};
#endif
int setup_purgatory(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
unsigned long initrd_len, const char *cmdline);
int setup_new_fdt(const struct kimage *image, void *fdt,
unsigned long initrd_load_addr, unsigned long initrd_len,
const char *cmdline);
int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
#endif /* CONFIG_KEXEC_FILE */
#else /* !CONFIG_KEXEC_CORE */

View File

@ -112,6 +112,10 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o crash.o \
machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file_$(BITS).o kexec_elf_$(BITS).o
ifeq ($(CONFIG_HAVE_IMA_KEXEC)$(CONFIG_IMA),yy)
obj-y += ima_kexec.o
endif
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o

View File

@ -0,0 +1,223 @@
/*
* Copyright (C) 2016 IBM Corporation
*
* Authors:
* Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/slab.h>
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/memblock.h>
#include <linux/libfdt.h>
static int get_addr_size_cells(int *addr_cells, int *size_cells)
{
struct device_node *root;
root = of_find_node_by_path("/");
if (!root)
return -EINVAL;
*addr_cells = of_n_addr_cells(root);
*size_cells = of_n_size_cells(root);
of_node_put(root);
return 0;
}
static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
size_t *size)
{
int ret, addr_cells, size_cells;
ret = get_addr_size_cells(&addr_cells, &size_cells);
if (ret)
return ret;
if (len < 4 * (addr_cells + size_cells))
return -ENOENT;
*addr = of_read_number(prop, addr_cells);
*size = of_read_number(prop + 4 * addr_cells, size_cells);
return 0;
}
/**
* ima_get_kexec_buffer - get IMA buffer from the previous kernel
* @addr: On successful return, set to point to the buffer contents.
* @size: On successful return, set to the buffer size.
*
* Return: 0 on success, negative errno on error.
*/
int ima_get_kexec_buffer(void **addr, size_t *size)
{
int ret, len;
unsigned long tmp_addr;
size_t tmp_size;
const void *prop;
prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
if (!prop)
return -ENOENT;
ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
if (ret)
return ret;
*addr = __va(tmp_addr);
*size = tmp_size;
return 0;
}
/**
* ima_free_kexec_buffer - free memory used by the IMA buffer
*/
int ima_free_kexec_buffer(void)
{
int ret;
unsigned long addr;
size_t size;
struct property *prop;
prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
if (!prop)
return -ENOENT;
ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
if (ret)
return ret;
ret = of_remove_property(of_chosen, prop);
if (ret)
return ret;
return memblock_free(addr, size);
}
/**
* remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
*
* The IMA measurement buffer is of no use to a subsequent kernel, so we always
* remove it from the device tree.
*/
void remove_ima_buffer(void *fdt, int chosen_node)
{
int ret, len;
unsigned long addr;
size_t size;
const void *prop;
prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
if (!prop)
return;
ret = do_get_kexec_buffer(prop, len, &addr, &size);
fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
if (ret)
return;
ret = delete_fdt_mem_rsv(fdt, addr, size);
if (!ret)
pr_debug("Removed old IMA buffer reservation.\n");
}
#ifdef CONFIG_IMA_KEXEC
/**
* arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer
*
* Architectures should use this function to pass on the IMA buffer
* information to the next kernel.
*
* Return: 0 on success, negative errno on error.
*/
int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
size_t size)
{
image->arch.ima_buffer_addr = load_addr;
image->arch.ima_buffer_size = size;
return 0;
}
static int write_number(void *p, u64 value, int cells)
{
if (cells == 1) {
u32 tmp;
if (value > U32_MAX)
return -EINVAL;
tmp = cpu_to_be32(value);
memcpy(p, &tmp, sizeof(tmp));
} else if (cells == 2) {
u64 tmp;
tmp = cpu_to_be64(value);
memcpy(p, &tmp, sizeof(tmp));
} else
return -EINVAL;
return 0;
}
/**
* setup_ima_buffer - add IMA buffer information to the fdt
* @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
* @chosen_node: Offset to the chosen node.
*
* Return: 0 on success, or negative errno on error.
*/
int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node)
{
int ret, addr_cells, size_cells, entry_size;
u8 value[16];
remove_ima_buffer(fdt, chosen_node);
if (!image->arch.ima_buffer_size)
return 0;
ret = get_addr_size_cells(&addr_cells, &size_cells);
if (ret)
return ret;
entry_size = 4 * (addr_cells + size_cells);
if (entry_size > sizeof(value))
return -EINVAL;
ret = write_number(value, image->arch.ima_buffer_addr, addr_cells);
if (ret)
return ret;
ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size,
size_cells);
if (ret)
return ret;
ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value,
entry_size);
if (ret < 0)
return -EINVAL;
ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr,
image->arch.ima_buffer_size);
if (ret)
return -EINVAL;
pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
image->arch.ima_buffer_addr, image->arch.ima_buffer_size);
return 0;
}
#endif /* CONFIG_IMA_KEXEC */

View File

@ -627,7 +627,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
goto out;
}
ret = setup_new_fdt(fdt, initrd_load_addr, initrd_len, cmdline);
ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
if (ret)
goto out;

View File

@ -27,6 +27,7 @@
#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <asm/ima.h>
#define SLAVE_CODE_SIZE 256
@ -180,7 +181,7 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
*
* Return: 0 on success, or negative errno on error.
*/
static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
{
int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
@ -209,6 +210,7 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
/*
* setup_new_fdt - modify /chosen and memory reservation for the next kernel
* @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
* @initrd_load_addr: Address where the next initrd will be loaded.
* @initrd_len: Size of the next initrd, or 0 if there will be none.
@ -217,8 +219,9 @@ static int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size
*
* Return: 0 on success, or negative errno on error.
*/
int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
unsigned long initrd_len, const char *cmdline)
int setup_new_fdt(const struct kimage *image, void *fdt,
unsigned long initrd_load_addr, unsigned long initrd_len,
const char *cmdline)
{
int ret, chosen_node;
const void *prop;
@ -328,6 +331,12 @@ int setup_new_fdt(void *fdt, unsigned long initrd_load_addr,
}
}
ret = setup_ima_buffer(image, fdt, chosen_node);
if (ret) {
pr_err("Error setting up the new device tree.\n");
return ret;
}
ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
if (ret) {
pr_err("Error setting up the new device tree.\n");

View File

@ -11,6 +11,7 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
@ -23,6 +24,10 @@ extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
{
@ -62,6 +67,13 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
#endif /* CONFIG_IMA */
#ifndef CONFIG_IMA_KEXEC
struct kimage;
static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
#ifdef CONFIG_IMA_APPRAISE
extern void ima_inode_post_setattr(struct dentry *dentry);
extern int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,

View File

@ -77,8 +77,11 @@ extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
#ifdef CONFIG_PRINTK
#define WARN_ON_RATELIMIT(condition, state) \
WARN_ON((condition) && __ratelimit(state))
#define WARN_ON_RATELIMIT(condition, state) ({ \
bool __rtn_cond = !!(condition); \
WARN_ON(__rtn_cond && __ratelimit(state)); \
__rtn_cond; \
})
#define WARN_RATELIMIT(condition, format, ...) \
({ \

View File

@ -19,6 +19,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kcov.h>
#include <asm/setup.h>
/*
* kcov descriptor (one per opened debugfs file).
@ -73,6 +74,11 @@ void notrace __sanitizer_cov_trace_pc(void)
if (mode == KCOV_MODE_TRACE) {
unsigned long *area;
unsigned long pos;
unsigned long ip = _RET_IP_;
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();
#endif
/*
* There is some code that runs in interrupts but for which
@ -86,7 +92,7 @@ void notrace __sanitizer_cov_trace_pc(void)
/* The first word is number of subsequent PCs. */
pos = READ_ONCE(area[0]) + 1;
if (likely(pos < t->kcov_size)) {
area[pos] = _RET_IP_;
area[pos] = ip;
WRITE_ONCE(area[0], pos);
}
}

View File

@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/ima.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <linux/syscalls.h>
@ -132,6 +133,9 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
return ret;
image->kernel_buf_len = size;
/* IMA needs to pass the measurement list to the next kernel. */
ima_add_kexec_buffer(image);
/* Call arch image probe handlers */
ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
image->kernel_buf_len);

View File

@ -26,7 +26,7 @@ config CONSOLE_LOGLEVEL_DEFAULT
the kernel bootargs. loglevel=<x> continues to override whatever
value is specified here as well.
Note: This does not affect the log level of un-prefixed prink()
Note: This does not affect the log level of un-prefixed printk()
usage in the kernel. That is controlled by the MESSAGE_LOGLEVEL_DEFAULT
option.

View File

@ -139,7 +139,20 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
}
if (end_index >= start_index) {
unsigned long count = invalidate_mapping_pages(mapping,
unsigned long count;
/*
* It's common to FADV_DONTNEED right after
* the read or write that instantiates the
* pages, in which case there will be some
* sitting on the local LRU cache. Try to
* avoid the expensive remote drain and the
* second cache tree walk below by flushing
* them out right away.
*/
lru_add_drain();
count = invalidate_mapping_pages(mapping,
start_index, end_index);
/*

View File

@ -27,6 +27,18 @@ config IMA
to learn more about IMA.
If unsure, say N.
config IMA_KEXEC
bool "Enable carrying the IMA measurement list across a soft boot"
depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
default n
help
TPM PCRs are only reset on a hard reboot. In order to validate
a TPM's quote after a soft boot, the IMA measurement list of the
running kernel must be saved and restored on boot.
Depending on the IMA policy, the measurement list can grow to
be very large.
config IMA_MEASURE_PCR_IDX
int
depends on IMA

View File

@ -8,4 +8,5 @@ obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
ima_policy.o ima_template.o ima_template_lib.o
ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
ima-$(CONFIG_HAVE_IMA_KEXEC) += ima_kexec.o
obj-$(CONFIG_IMA_BLACKLIST_KEYRING) += ima_mok.o

View File

@ -28,6 +28,10 @@
#include "../integrity.h"
#ifdef CONFIG_HAVE_IMA_KEXEC
#include <asm/ima.h>
#endif
enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
@ -81,6 +85,7 @@ struct ima_template_field {
/* IMA template descriptor definition */
struct ima_template_desc {
struct list_head list;
char *name;
char *fmt;
int num_fields;
@ -102,6 +107,27 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
/* Some details preceding the binary serialized measurement list */
struct ima_kexec_hdr {
u16 version;
u16 _reserved0;
u32 _reserved1;
u64 buffer_size;
u64 count;
};
#ifdef CONFIG_HAVE_IMA_KEXEC
void ima_load_kexec_buffer(void);
#else
static inline void ima_load_kexec_buffer(void) {}
#endif /* CONFIG_HAVE_IMA_KEXEC */
/*
* The default binary_runtime_measurements list format is defined as the
* platform native format. The canonical format is defined as little-endian.
*/
extern bool ima_canonical_fmt;
/* Internal IMA function definitions */
int ima_init(void);
int ima_fs_init(void);
@ -122,7 +148,12 @@ int ima_init_crypto(void);
void ima_putc(struct seq_file *m, void *data, int datalen);
void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
struct ima_template_desc *ima_template_desc_current(void);
int ima_restore_measurement_entry(struct ima_template_entry *entry);
int ima_restore_measurement_list(loff_t bufsize, void *buf);
int ima_measurements_show(struct seq_file *m, void *v);
unsigned long ima_get_binary_runtime_size(void);
int ima_init_template(void);
void ima_init_template_list(void);
/*
* used to protect h_table and sha_table

View File

@ -477,11 +477,13 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
u8 *data_to_hash = field_data[i].data;
u32 datalen = field_data[i].len;
u32 datalen_to_hash =
!ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
rc = crypto_shash_update(shash,
(const u8 *) &field_data[i].len,
sizeof(field_data[i].len));
(const u8 *) &datalen_to_hash,
sizeof(datalen_to_hash));
if (rc)
break;
} else if (strcmp(td->fields[i]->field_id, "n") == 0) {

View File

@ -28,6 +28,16 @@
static DEFINE_MUTEX(ima_write_mutex);
bool ima_canonical_fmt;
static int __init default_canonical_fmt_setup(char *str)
{
#ifdef __BIG_ENDIAN
ima_canonical_fmt = 1;
#endif
return 1;
}
__setup("ima_canonical_fmt", default_canonical_fmt_setup);
static int valid_policy = 1;
#define TMPBUFLEN 12
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
@ -116,13 +126,13 @@ void ima_putc(struct seq_file *m, void *data, int datalen)
* [eventdata length]
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
int ima_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
char *template_name;
int namelen;
u32 pcr, namelen, template_data_len; /* temporary fields */
bool is_ima_template = false;
int i;
@ -139,25 +149,29 @@ static int ima_measurements_show(struct seq_file *m, void *v)
* PCR used defaults to the same (config option) in
* little-endian format, unless set in policy
*/
ima_putc(m, &e->pcr, sizeof(e->pcr));
pcr = !ima_canonical_fmt ? e->pcr : cpu_to_le32(e->pcr);
ima_putc(m, &pcr, sizeof(e->pcr));
/* 2nd: template digest */
ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
namelen = strlen(template_name);
namelen = !ima_canonical_fmt ? strlen(template_name) :
cpu_to_le32(strlen(template_name));
ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
ima_putc(m, template_name, namelen);
ima_putc(m, template_name, strlen(template_name));
/* 5th: template length (except for 'ima' template) */
if (strcmp(template_name, IMA_TEMPLATE_IMA_NAME) == 0)
is_ima_template = true;
if (!is_ima_template)
ima_putc(m, &e->template_data_len,
sizeof(e->template_data_len));
if (!is_ima_template) {
template_data_len = !ima_canonical_fmt ? e->template_data_len :
cpu_to_le32(e->template_data_len);
ima_putc(m, &template_data_len, sizeof(e->template_data_len));
}
/* 6th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {

View File

@ -129,6 +129,8 @@ int __init ima_init(void)
if (rc != 0)
return rc;
ima_load_kexec_buffer();
rc = ima_add_boot_aggregate(); /* boot aggregate must be first entry */
if (rc != 0)
return rc;

View File

@ -0,0 +1,168 @@
/*
* Copyright (C) 2016 IBM Corporation
*
* Authors:
* Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
* Mimi Zohar <zohar@linux.vnet.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
#include <linux/kexec.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
static int ima_dump_measurement_list(unsigned long *buffer_size, void **buffer,
unsigned long segment_size)
{
struct ima_queue_entry *qe;
struct seq_file file;
struct ima_kexec_hdr khdr;
int ret = 0;
/* segment size can't change between kexec load and execute */
file.buf = vmalloc(segment_size);
if (!file.buf) {
ret = -ENOMEM;
goto out;
}
file.size = segment_size;
file.read_pos = 0;
file.count = sizeof(khdr); /* reserved space */
memset(&khdr, 0, sizeof(khdr));
khdr.version = 1;
list_for_each_entry_rcu(qe, &ima_measurements, later) {
if (file.count < file.size) {
khdr.count++;
ima_measurements_show(&file, qe);
} else {
ret = -EINVAL;
break;
}
}
if (ret < 0)
goto out;
/*
* fill in reserved space with some buffer details
* (eg. version, buffer size, number of measurements)
*/
khdr.buffer_size = file.count;
if (ima_canonical_fmt) {
khdr.version = cpu_to_le16(khdr.version);
khdr.count = cpu_to_le64(khdr.count);
khdr.buffer_size = cpu_to_le64(khdr.buffer_size);
}
memcpy(file.buf, &khdr, sizeof(khdr));
print_hex_dump(KERN_DEBUG, "ima dump: ", DUMP_PREFIX_NONE,
16, 1, file.buf,
file.count < 100 ? file.count : 100, true);
*buffer_size = file.count;
*buffer = file.buf;
out:
if (ret == -EINVAL)
vfree(file.buf);
return ret;
}
/*
* Called during kexec_file_load so that IMA can add a segment to the kexec
* image for the measurement list for the next kernel.
*
* This function assumes that kexec_mutex is held.
*/
void ima_add_kexec_buffer(struct kimage *image)
{
struct kexec_buf kbuf = { .image = image, .buf_align = PAGE_SIZE,
.buf_min = 0, .buf_max = ULONG_MAX,
.top_down = true };
unsigned long binary_runtime_size;
/* use more understandable variable names than defined in kbuf */
void *kexec_buffer = NULL;
size_t kexec_buffer_size;
size_t kexec_segment_size;
int ret;
/*
* Reserve an extra half page of memory for additional measurements
* added during the kexec load.
*/
binary_runtime_size = ima_get_binary_runtime_size();
if (binary_runtime_size >= ULONG_MAX - PAGE_SIZE)
kexec_segment_size = ULONG_MAX;
else
kexec_segment_size = ALIGN(ima_get_binary_runtime_size() +
PAGE_SIZE / 2, PAGE_SIZE);
if ((kexec_segment_size == ULONG_MAX) ||
((kexec_segment_size >> PAGE_SHIFT) > totalram_pages / 2)) {
pr_err("Binary measurement list too large.\n");
return;
}
ima_dump_measurement_list(&kexec_buffer_size, &kexec_buffer,
kexec_segment_size);
if (!kexec_buffer) {
pr_err("Not enough memory for the kexec measurement buffer.\n");
return;
}
kbuf.buffer = kexec_buffer;
kbuf.bufsz = kexec_buffer_size;
kbuf.memsz = kexec_segment_size;
ret = kexec_add_buffer(&kbuf);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
return;
}
ret = arch_ima_add_kexec_buffer(image, kbuf.mem, kexec_segment_size);
if (ret) {
pr_err("Error passing over kexec measurement buffer.\n");
return;
}
pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
kbuf.mem);
}
#endif /* IMA_KEXEC */
/*
* Restore the measurement list from the previous kernel.
*/
void ima_load_kexec_buffer(void)
{
void *kexec_buffer = NULL;
size_t kexec_buffer_size = 0;
int rc;
rc = ima_get_kexec_buffer(&kexec_buffer, &kexec_buffer_size);
switch (rc) {
case 0:
rc = ima_restore_measurement_list(kexec_buffer_size,
kexec_buffer);
if (rc != 0)
pr_err("Failed to restore the measurement list: %d\n",
rc);
ima_free_kexec_buffer();
break;
case -ENOTSUPP:
pr_debug("Restoring the measurement list not supported\n");
break;
case -ENOENT:
pr_debug("No measurement list to restore\n");
break;
default:
pr_debug("Error restoring the measurement list: %d\n", rc);
}
}

View File

@ -418,6 +418,7 @@ static int __init init_ima(void)
{
int error;
ima_init_template_list();
hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
if (!error) {

View File

@ -29,6 +29,11 @@
#define AUDIT_CAUSE_LEN_MAX 32
LIST_HEAD(ima_measurements); /* list of all measurements */
#ifdef CONFIG_IMA_KEXEC
static unsigned long binary_runtime_size;
#else
static unsigned long binary_runtime_size = ULONG_MAX;
#endif
/* key: inode (before secure-hashing a file) */
struct ima_h_table ima_htable = {
@ -64,12 +69,32 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value,
return ret;
}
/*
* Calculate the memory required for serializing a single
* binary_runtime_measurement list entry, which contains a
* couple of variable length fields (e.g template name and data).
*/
static int get_binary_runtime_size(struct ima_template_entry *entry)
{
int size = 0;
size += sizeof(u32); /* pcr */
size += sizeof(entry->digest);
size += sizeof(int); /* template name size field */
size += strlen(entry->template_desc->name) + 1;
size += sizeof(entry->template_data_len);
size += entry->template_data_len;
return size;
}
/* ima_add_template_entry helper function:
* - Add template entry to measurement list and hash table.
* - Add template entry to the measurement list and hash table, for
* all entries except those carried across kexec.
*
* (Called with ima_extend_list_mutex held.)
*/
static int ima_add_digest_entry(struct ima_template_entry *entry)
static int ima_add_digest_entry(struct ima_template_entry *entry,
bool update_htable)
{
struct ima_queue_entry *qe;
unsigned int key;
@ -85,11 +110,34 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
list_add_tail_rcu(&qe->later, &ima_measurements);
atomic_long_inc(&ima_htable.len);
key = ima_hash_key(entry->digest);
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
if (update_htable) {
key = ima_hash_key(entry->digest);
hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
}
if (binary_runtime_size != ULONG_MAX) {
int size;
size = get_binary_runtime_size(entry);
binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ?
binary_runtime_size + size : ULONG_MAX;
}
return 0;
}
/*
* Return the amount of memory required for serializing the
* entire binary_runtime_measurement list, including the ima_kexec_hdr
* structure.
*/
unsigned long ima_get_binary_runtime_size(void)
{
if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr)))
return ULONG_MAX;
else
return binary_runtime_size + sizeof(struct ima_kexec_hdr);
};
static int ima_pcr_extend(const u8 *hash, int pcr)
{
int result = 0;
@ -103,8 +151,13 @@ static int ima_pcr_extend(const u8 *hash, int pcr)
return result;
}
/* Add template entry to the measurement list and hash table,
* and extend the pcr.
/*
* Add template entry to the measurement list and hash table, and
* extend the pcr.
*
* On systems which support carrying the IMA measurement list across
* kexec, maintain the total memory size required for serializing the
* binary_runtime_measurements.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
const char *op, struct inode *inode,
@ -126,7 +179,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
}
result = ima_add_digest_entry(entry);
result = ima_add_digest_entry(entry, 1);
if (result < 0) {
audit_cause = "ENOMEM";
audit_info = 0;
@ -149,3 +202,13 @@ out:
op, audit_cause, result, audit_info);
return result;
}
int ima_restore_measurement_entry(struct ima_template_entry *entry)
{
int result = 0;
mutex_lock(&ima_extend_list_mutex);
result = ima_add_digest_entry(entry, 0);
mutex_unlock(&ima_extend_list_mutex);
return result;
}

View File

@ -15,16 +15,20 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/rculist.h>
#include "ima.h"
#include "ima_template_lib.h"
static struct ima_template_desc defined_templates[] = {
static struct ima_template_desc builtin_templates[] = {
{.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
{.name = "ima-ng", .fmt = "d-ng|n-ng"},
{.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
{.name = "", .fmt = ""}, /* placeholder for a custom format */
};
static LIST_HEAD(defined_templates);
static DEFINE_SPINLOCK(template_list);
static struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
.field_show = ima_show_template_digest},
@ -37,6 +41,7 @@ static struct ima_template_field supported_fields[] = {
{.field_id = "sig", .field_init = ima_eventsig_init,
.field_show = ima_show_template_sig},
};
#define MAX_TEMPLATE_NAME_LEN 15
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
@ -52,6 +57,8 @@ static int __init ima_template_setup(char *str)
if (ima_template)
return 1;
ima_init_template_list();
/*
* Verify that a template with the supplied name exists.
* If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
@ -80,7 +87,7 @@ __setup("ima_template=", ima_template_setup);
static int __init ima_template_fmt_setup(char *str)
{
int num_templates = ARRAY_SIZE(defined_templates);
int num_templates = ARRAY_SIZE(builtin_templates);
if (ima_template)
return 1;
@ -91,22 +98,28 @@ static int __init ima_template_fmt_setup(char *str)
return 1;
}
defined_templates[num_templates - 1].fmt = str;
ima_template = defined_templates + num_templates - 1;
builtin_templates[num_templates - 1].fmt = str;
ima_template = builtin_templates + num_templates - 1;
return 1;
}
__setup("ima_template_fmt=", ima_template_fmt_setup);
static struct ima_template_desc *lookup_template_desc(const char *name)
{
int i;
struct ima_template_desc *template_desc;
int found = 0;
for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
if (strcmp(defined_templates[i].name, name) == 0)
return defined_templates + i;
rcu_read_lock();
list_for_each_entry_rcu(template_desc, &defined_templates, list) {
if ((strcmp(template_desc->name, name) == 0) ||
(strcmp(template_desc->fmt, name) == 0)) {
found = 1;
break;
}
}
return NULL;
rcu_read_unlock();
return found ? template_desc : NULL;
}
static struct ima_template_field *lookup_template_field(const char *field_id)
@ -142,9 +155,14 @@ static int template_desc_init_fields(const char *template_fmt,
{
const char *template_fmt_ptr;
struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
int template_num_fields = template_fmt_size(template_fmt);
int template_num_fields;
int i, len;
if (num_fields && *num_fields > 0) /* already initialized? */
return 0;
template_num_fields = template_fmt_size(template_fmt);
if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) {
pr_err("format string '%s' contains too many fields\n",
template_fmt);
@ -182,11 +200,28 @@ static int template_desc_init_fields(const char *template_fmt,
return 0;
}
void ima_init_template_list(void)
{
int i;
if (!list_empty(&defined_templates))
return;
spin_lock(&template_list);
for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) {
list_add_tail_rcu(&builtin_templates[i].list,
&defined_templates);
}
spin_unlock(&template_list);
}
struct ima_template_desc *ima_template_desc_current(void)
{
if (!ima_template)
if (!ima_template) {
ima_init_template_list();
ima_template =
lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
}
return ima_template;
}
@ -205,3 +240,239 @@ int __init ima_init_template(void)
return result;
}
static struct ima_template_desc *restore_template_fmt(char *template_name)
{
struct ima_template_desc *template_desc = NULL;
int ret;
ret = template_desc_init_fields(template_name, NULL, NULL);
if (ret < 0) {
pr_err("attempting to initialize the template \"%s\" failed\n",
template_name);
goto out;
}
template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL);
if (!template_desc)
goto out;
template_desc->name = "";
template_desc->fmt = kstrdup(template_name, GFP_KERNEL);
if (!template_desc->fmt)
goto out;
spin_lock(&template_list);
list_add_tail_rcu(&template_desc->list, &defined_templates);
spin_unlock(&template_list);
out:
return template_desc;
}
static int ima_restore_template_data(struct ima_template_desc *template_desc,
void *template_data,
int template_data_size,
struct ima_template_entry **entry)
{
struct binary_field_data {
u32 len;
u8 data[0];
} __packed;
struct binary_field_data *field_data;
int offset = 0;
int ret = 0;
int i;
*entry = kzalloc(sizeof(**entry) +
template_desc->num_fields * sizeof(struct ima_field_data),
GFP_NOFS);
if (!*entry)
return -ENOMEM;
(*entry)->template_desc = template_desc;
for (i = 0; i < template_desc->num_fields; i++) {
field_data = template_data + offset;
/* Each field of the template data is prefixed with a length. */
if (offset > (template_data_size - sizeof(*field_data))) {
pr_err("Restoring the template field failed\n");
ret = -EINVAL;
break;
}
offset += sizeof(*field_data);
if (ima_canonical_fmt)
field_data->len = le32_to_cpu(field_data->len);
if (offset > (template_data_size - field_data->len)) {
pr_err("Restoring the template field data failed\n");
ret = -EINVAL;
break;
}
offset += field_data->len;
(*entry)->template_data[i].len = field_data->len;
(*entry)->template_data_len += sizeof(field_data->len);
(*entry)->template_data[i].data =
kzalloc(field_data->len + 1, GFP_KERNEL);
if (!(*entry)->template_data[i].data) {
ret = -ENOMEM;
break;
}
memcpy((*entry)->template_data[i].data, field_data->data,
field_data->len);
(*entry)->template_data_len += field_data->len;
}
if (ret < 0) {
ima_free_template_entry(*entry);
*entry = NULL;
}
return ret;
}
/* Restore the serialized binary measurement list without extending PCRs. */
int ima_restore_measurement_list(loff_t size, void *buf)
{
struct binary_hdr_v1 {
u32 pcr;
u8 digest[TPM_DIGEST_SIZE];
u32 template_name_len;
char template_name[0];
} __packed;
char template_name[MAX_TEMPLATE_NAME_LEN];
struct binary_data_v1 {
u32 template_data_size;
char template_data[0];
} __packed;
struct ima_kexec_hdr *khdr = buf;
struct binary_hdr_v1 *hdr_v1;
struct binary_data_v1 *data_v1;
void *bufp = buf + sizeof(*khdr);
void *bufendp;
struct ima_template_entry *entry;
struct ima_template_desc *template_desc;
unsigned long count = 0;
int ret = 0;
if (!buf || size < sizeof(*khdr))
return 0;
if (ima_canonical_fmt) {
khdr->version = le16_to_cpu(khdr->version);
khdr->count = le64_to_cpu(khdr->count);
khdr->buffer_size = le64_to_cpu(khdr->buffer_size);
}
if (khdr->version != 1) {
pr_err("attempting to restore a incompatible measurement list");
return -EINVAL;
}
if (khdr->count > ULONG_MAX - 1) {
pr_err("attempting to restore too many measurements");
return -EINVAL;
}
/*
* ima kexec buffer prefix: version, buffer size, count
* v1 format: pcr, digest, template-name-len, template-name,
* template-data-size, template-data
*/
bufendp = buf + khdr->buffer_size;
while ((bufp < bufendp) && (count++ < khdr->count)) {
hdr_v1 = bufp;
if (bufp > (bufendp - sizeof(*hdr_v1))) {
pr_err("attempting to restore partial measurement\n");
ret = -EINVAL;
break;
}
bufp += sizeof(*hdr_v1);
if (ima_canonical_fmt)
hdr_v1->template_name_len =
le32_to_cpu(hdr_v1->template_name_len);
if ((hdr_v1->template_name_len >= MAX_TEMPLATE_NAME_LEN) ||
(bufp > (bufendp - hdr_v1->template_name_len))) {
pr_err("attempting to restore a template name \
that is too long\n");
ret = -EINVAL;
break;
}
data_v1 = bufp += (u_int8_t)hdr_v1->template_name_len;
/* template name is not null terminated */
memcpy(template_name, hdr_v1->template_name,
hdr_v1->template_name_len);
template_name[hdr_v1->template_name_len] = 0;
if (strcmp(template_name, "ima") == 0) {
pr_err("attempting to restore an unsupported \
template \"%s\" failed\n", template_name);
ret = -EINVAL;
break;
}
template_desc = lookup_template_desc(template_name);
if (!template_desc) {
template_desc = restore_template_fmt(template_name);
if (!template_desc)
break;
}
/*
* Only the running system's template format is initialized
* on boot. As needed, initialize the other template formats.
*/
ret = template_desc_init_fields(template_desc->fmt,
&(template_desc->fields),
&(template_desc->num_fields));
if (ret < 0) {
pr_err("attempting to restore the template fmt \"%s\" \
failed\n", template_desc->fmt);
ret = -EINVAL;
break;
}
if (bufp > (bufendp - sizeof(data_v1->template_data_size))) {
pr_err("restoring the template data size failed\n");
ret = -EINVAL;
break;
}
bufp += (u_int8_t) sizeof(data_v1->template_data_size);
if (ima_canonical_fmt)
data_v1->template_data_size =
le32_to_cpu(data_v1->template_data_size);
if (bufp > (bufendp - data_v1->template_data_size)) {
pr_err("restoring the template data failed\n");
ret = -EINVAL;
break;
}
bufp += data_v1->template_data_size;
ret = ima_restore_template_data(template_desc,
data_v1->template_data,
data_v1->template_data_size,
&entry);
if (ret < 0)
break;
memcpy(entry->digest, hdr_v1->digest, TPM_DIGEST_SIZE);
entry->pcr =
!ima_canonical_fmt ? hdr_v1->pcr : le32_to_cpu(hdr_v1->pcr);
ret = ima_restore_measurement_entry(entry);
if (ret < 0)
break;
}
return ret;
}

View File

@ -103,8 +103,11 @@ static void ima_show_template_data_binary(struct seq_file *m,
u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
strlen(field_data->data) : field_data->len;
if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
ima_putc(m, &len, sizeof(len));
if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) {
u32 field_len = !ima_canonical_fmt ? len : cpu_to_le32(len);
ima_putc(m, &field_len, sizeof(field_len));
}
if (!len)
return;