2019-05-29 14:17:56 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-08 01:34:32 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
2018-10-26 08:14:01 +00:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
2014-06-06 21:37:31 +00:00
|
|
|
|
2012-03-08 01:34:34 +00:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/err.h>
|
2012-03-08 01:34:32 +00:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/io.h>
|
2016-09-01 15:13:46 +00:00
|
|
|
#include <linux/kernel.h>
|
2012-03-08 01:34:34 +00:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/memblock.h>
|
2016-09-01 15:13:46 +00:00
|
|
|
#include <linux/pstore_ram.h>
|
2012-03-08 01:34:33 +00:00
|
|
|
#include <linux/rslib.h>
|
2012-03-08 01:34:32 +00:00
|
|
|
#include <linux/slab.h>
|
2016-09-01 15:13:46 +00:00
|
|
|
#include <linux/uaccess.h>
|
2012-03-08 01:34:34 +00:00
|
|
|
#include <linux/vmalloc.h>
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
#include <asm/page.h>
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2018-11-01 22:11:47 +00:00
|
|
|
/**
|
|
|
|
* struct persistent_ram_buffer - persistent circular RAM buffer
|
|
|
|
*
|
|
|
|
* @sig:
|
|
|
|
* signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
|
|
|
|
* @start:
|
|
|
|
* offset into @data where the beginning of the stored bytes begin
|
|
|
|
* @size:
|
|
|
|
* number of valid bytes stored in @data
|
|
|
|
*/
|
2012-03-08 01:34:32 +00:00
|
|
|
struct persistent_ram_buffer {
|
|
|
|
uint32_t sig;
|
2012-03-08 01:34:35 +00:00
|
|
|
atomic_t start;
|
|
|
|
atomic_t size;
|
2012-03-08 01:34:32 +00:00
|
|
|
uint8_t data[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
|
|
|
|
|
2012-03-08 01:34:35 +00:00
|
|
|
static inline size_t buffer_size(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
return atomic_read(&prz->buffer->size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline size_t buffer_start(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
return atomic_read(&prz->buffer->start);
|
|
|
|
}
|
|
|
|
|
2013-04-09 01:23:33 +00:00
|
|
|
/* increase and wrap the start pointer, returning the old value */
|
2016-09-08 11:48:06 +00:00
|
|
|
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
|
2013-04-09 01:23:33 +00:00
|
|
|
{
|
|
|
|
int old;
|
|
|
|
int new;
|
2016-10-20 07:34:01 +00:00
|
|
|
unsigned long flags = 0;
|
2013-04-09 01:23:33 +00:00
|
|
|
|
2016-10-20 07:34:01 +00:00
|
|
|
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
|
|
|
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
2013-04-09 01:23:33 +00:00
|
|
|
|
|
|
|
old = atomic_read(&prz->buffer->start);
|
|
|
|
new = old + a;
|
2014-03-12 13:24:44 +00:00
|
|
|
while (unlikely(new >= prz->buffer_size))
|
2013-04-09 01:23:33 +00:00
|
|
|
new -= prz->buffer_size;
|
|
|
|
atomic_set(&prz->buffer->start, new);
|
|
|
|
|
2016-10-20 07:34:01 +00:00
|
|
|
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
|
|
|
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
2013-04-09 01:23:33 +00:00
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* increase the size counter until it hits the max size */
|
2016-09-08 11:48:06 +00:00
|
|
|
static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
|
2013-04-09 01:23:33 +00:00
|
|
|
{
|
|
|
|
size_t old;
|
|
|
|
size_t new;
|
2016-10-20 07:34:01 +00:00
|
|
|
unsigned long flags = 0;
|
2013-04-09 01:23:33 +00:00
|
|
|
|
2016-10-20 07:34:01 +00:00
|
|
|
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
|
|
|
raw_spin_lock_irqsave(&prz->buffer_lock, flags);
|
2013-04-09 01:23:33 +00:00
|
|
|
|
|
|
|
old = atomic_read(&prz->buffer->size);
|
|
|
|
if (old == prz->buffer_size)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
new = old + a;
|
|
|
|
if (new > prz->buffer_size)
|
|
|
|
new = prz->buffer_size;
|
|
|
|
atomic_set(&prz->buffer->size, new);
|
|
|
|
|
|
|
|
exit:
|
2016-10-20 07:34:01 +00:00
|
|
|
if (!(prz->flags & PRZ_FLAG_NO_LOCK))
|
|
|
|
raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
|
2013-04-09 01:23:33 +00:00
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:36 +00:00
|
|
|
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
|
2012-03-08 01:34:32 +00:00
|
|
|
uint8_t *data, size_t len, uint8_t *ecc)
|
|
|
|
{
|
|
|
|
int i;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-03-08 01:34:32 +00:00
|
|
|
/* Initialize the parity buffer */
|
2018-03-07 20:18:33 +00:00
|
|
|
memset(prz->ecc_info.par, 0,
|
|
|
|
prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
|
|
|
|
encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
|
2012-05-22 23:33:23 +00:00
|
|
|
for (i = 0; i < prz->ecc_info.ecc_size; i++)
|
2018-03-07 20:18:33 +00:00
|
|
|
ecc[i] = prz->ecc_info.par[i];
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
|
|
|
|
void *data, size_t len, uint8_t *ecc)
|
|
|
|
{
|
|
|
|
int i;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
for (i = 0; i < prz->ecc_info.ecc_size; i++)
|
2018-03-07 20:18:33 +00:00
|
|
|
prz->ecc_info.par[i] = ecc[i];
|
|
|
|
return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
|
2012-03-08 01:34:32 +00:00
|
|
|
NULL, 0, NULL, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:36 +00:00
|
|
|
static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
|
2012-03-08 01:34:35 +00:00
|
|
|
unsigned int start, unsigned int count)
|
2012-03-08 01:34:32 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
|
|
|
uint8_t *buffer_end = buffer->data + prz->buffer_size;
|
|
|
|
uint8_t *block;
|
|
|
|
uint8_t *par;
|
2012-05-22 23:33:23 +00:00
|
|
|
int ecc_block_size = prz->ecc_info.block_size;
|
|
|
|
int ecc_size = prz->ecc_info.ecc_size;
|
|
|
|
int size = ecc_block_size;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
if (!ecc_size)
|
2012-03-08 01:34:33 +00:00
|
|
|
return;
|
|
|
|
|
2012-03-08 01:34:35 +00:00
|
|
|
block = buffer->data + (start & ~(ecc_block_size - 1));
|
2012-05-22 23:33:23 +00:00
|
|
|
par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
|
2012-03-08 01:34:35 +00:00
|
|
|
|
2012-03-08 01:34:32 +00:00
|
|
|
do {
|
2012-03-08 01:34:33 +00:00
|
|
|
if (block + ecc_block_size > buffer_end)
|
2012-03-08 01:34:32 +00:00
|
|
|
size = buffer_end - block;
|
|
|
|
persistent_ram_encode_rs8(prz, block, size, par);
|
2012-03-08 01:34:33 +00:00
|
|
|
block += ecc_block_size;
|
|
|
|
par += ecc_size;
|
2012-03-08 01:34:35 +00:00
|
|
|
} while (block < buffer->data + start + count);
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:33 +00:00
|
|
|
static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
|
2012-03-08 01:34:32 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
if (!prz->ecc_info.ecc_size)
|
2012-03-08 01:34:33 +00:00
|
|
|
return;
|
|
|
|
|
2012-03-08 01:34:32 +00:00
|
|
|
persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
|
|
|
|
prz->par_header);
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:33 +00:00
|
|
|
static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
|
2012-03-08 01:34:32 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
|
|
|
uint8_t *block;
|
|
|
|
uint8_t *par;
|
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
if (!prz->ecc_info.ecc_size)
|
2012-03-08 01:34:33 +00:00
|
|
|
return;
|
|
|
|
|
2012-03-08 01:34:32 +00:00
|
|
|
block = buffer->data;
|
|
|
|
par = prz->par_buffer;
|
2012-03-08 01:34:35 +00:00
|
|
|
while (block < buffer->data + buffer_size(prz)) {
|
2012-03-08 01:34:32 +00:00
|
|
|
int numerr;
|
2012-05-22 23:33:23 +00:00
|
|
|
int size = prz->ecc_info.block_size;
|
2012-03-08 01:34:32 +00:00
|
|
|
if (block + size > buffer->data + prz->buffer_size)
|
|
|
|
size = buffer->data + prz->buffer_size - block;
|
|
|
|
numerr = persistent_ram_decode_rs8(prz, block, size, par);
|
|
|
|
if (numerr > 0) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_devel("error in block %p, %d\n", block, numerr);
|
2012-03-08 01:34:32 +00:00
|
|
|
prz->corrected_bytes += numerr;
|
|
|
|
} else if (numerr < 0) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_devel("uncorrectable error in block %p\n", block);
|
2012-03-08 01:34:32 +00:00
|
|
|
prz->bad_blocks++;
|
|
|
|
}
|
2012-05-22 23:33:23 +00:00
|
|
|
block += prz->ecc_info.block_size;
|
|
|
|
par += prz->ecc_info.ecc_size;
|
2012-03-08 01:34:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-10 00:03:19 +00:00
|
|
|
static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
|
2012-05-22 23:33:23 +00:00
|
|
|
struct persistent_ram_ecc_info *ecc_info)
|
2012-03-08 01:34:33 +00:00
|
|
|
{
|
|
|
|
int numerr;
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
|
|
|
int ecc_blocks;
|
2012-06-19 02:15:53 +00:00
|
|
|
size_t ecc_total;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
if (!ecc_info || !ecc_info->ecc_size)
|
2012-03-08 01:34:33 +00:00
|
|
|
return 0;
|
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
prz->ecc_info.block_size = ecc_info->block_size ?: 128;
|
|
|
|
prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
|
|
|
|
prz->ecc_info.symsize = ecc_info->symsize ?: 8;
|
|
|
|
prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
|
|
|
|
prz->ecc_info.block_size +
|
|
|
|
prz->ecc_info.ecc_size);
|
|
|
|
ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
|
2012-06-19 02:15:53 +00:00
|
|
|
if (ecc_total >= prz->buffer_size) {
|
|
|
|
pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
|
2012-05-22 23:33:23 +00:00
|
|
|
__func__, prz->ecc_info.ecc_size,
|
|
|
|
ecc_total, prz->buffer_size);
|
2012-03-08 01:34:33 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-06-19 02:15:53 +00:00
|
|
|
prz->buffer_size -= ecc_total;
|
2012-03-08 01:34:33 +00:00
|
|
|
prz->par_buffer = buffer->data + prz->buffer_size;
|
2012-05-22 23:33:23 +00:00
|
|
|
prz->par_header = prz->par_buffer +
|
|
|
|
ecc_blocks * prz->ecc_info.ecc_size;
|
2012-03-08 01:34:33 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* first consecutive root is 0
|
|
|
|
* primitive element to generate roots = 1
|
|
|
|
*/
|
2012-05-22 23:33:23 +00:00
|
|
|
prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
|
|
|
|
0, 1, prz->ecc_info.ecc_size);
|
2012-03-08 01:34:33 +00:00
|
|
|
if (prz->rs_decoder == NULL) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_info("init_rs failed\n");
|
2012-03-08 01:34:33 +00:00
|
|
|
return -EINVAL;
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2018-03-07 20:18:33 +00:00
|
|
|
/* allocate workspace instead of using stack VLA */
|
|
|
|
prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size,
|
|
|
|
sizeof(*prz->ecc_info.par),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!prz->ecc_info.par) {
|
|
|
|
pr_err("cannot allocate ECC parity workspace\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:33 +00:00
|
|
|
prz->corrected_bytes = 0;
|
|
|
|
prz->bad_blocks = 0;
|
|
|
|
|
|
|
|
numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
|
|
|
|
prz->par_header);
|
|
|
|
if (numerr > 0) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_info("error in header, %d\n", numerr);
|
2012-03-08 01:34:33 +00:00
|
|
|
prz->corrected_bytes += numerr;
|
|
|
|
} else if (numerr < 0) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_info("uncorrectable error in header\n");
|
2012-03-08 01:34:33 +00:00
|
|
|
prz->bad_blocks++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
|
|
|
|
char *str, size_t len)
|
|
|
|
{
|
|
|
|
ssize_t ret;
|
|
|
|
|
2012-12-06 05:19:51 +00:00
|
|
|
if (!prz->ecc_info.ecc_size)
|
|
|
|
return 0;
|
|
|
|
|
2012-03-08 01:34:33 +00:00
|
|
|
if (prz->corrected_bytes || prz->bad_blocks)
|
|
|
|
ret = snprintf(str, len, ""
|
|
|
|
"\n%d Corrected bytes, %d unrecoverable blocks\n",
|
|
|
|
prz->corrected_bytes, prz->bad_blocks);
|
|
|
|
else
|
|
|
|
ret = snprintf(str, len, "\nNo errors detected\n");
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:36 +00:00
|
|
|
static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
|
2012-03-08 01:34:35 +00:00
|
|
|
const void *s, unsigned int start, unsigned int count)
|
2012-03-08 01:34:33 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
2016-02-15 08:19:48 +00:00
|
|
|
memcpy_toio(buffer->data + start, s, count);
|
2012-03-08 01:34:35 +00:00
|
|
|
persistent_ram_update_ecc(prz, start, count);
|
2012-03-08 01:34:33 +00:00
|
|
|
}
|
|
|
|
|
2016-09-01 15:13:46 +00:00
|
|
|
static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
|
|
|
|
const void __user *s, unsigned int start, unsigned int count)
|
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
|
|
|
int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
|
|
|
|
-EFAULT : 0;
|
|
|
|
persistent_ram_update_ecc(prz, start, count);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-05-26 13:07:49 +00:00
|
|
|
void persistent_ram_save_old(struct persistent_ram_zone *prz)
|
2012-03-08 01:34:33 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_buffer *buffer = prz->buffer;
|
2012-03-08 01:34:35 +00:00
|
|
|
size_t size = buffer_size(prz);
|
|
|
|
size_t start = buffer_start(prz);
|
2012-03-08 01:34:33 +00:00
|
|
|
|
2012-05-26 13:07:49 +00:00
|
|
|
if (!size)
|
|
|
|
return;
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2012-05-26 13:07:49 +00:00
|
|
|
if (!prz->old_log) {
|
|
|
|
persistent_ram_ecc_old(prz);
|
|
|
|
prz->old_log = kmalloc(size, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
if (!prz->old_log) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_err("failed to allocate buffer\n");
|
2012-03-08 01:34:32 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:35 +00:00
|
|
|
prz->old_log_size = size;
|
2016-02-15 08:19:49 +00:00
|
|
|
memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
|
|
|
|
memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:36 +00:00
|
|
|
int notrace persistent_ram_write(struct persistent_ram_zone *prz,
|
2012-03-08 01:34:32 +00:00
|
|
|
const void *s, unsigned int count)
|
|
|
|
{
|
|
|
|
int rem;
|
|
|
|
int c = count;
|
2012-03-08 01:34:35 +00:00
|
|
|
size_t start;
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2012-03-08 01:34:35 +00:00
|
|
|
if (unlikely(c > prz->buffer_size)) {
|
2012-03-08 01:34:32 +00:00
|
|
|
s += c - prz->buffer_size;
|
|
|
|
c = prz->buffer_size;
|
|
|
|
}
|
2012-03-08 01:34:35 +00:00
|
|
|
|
2012-05-12 00:17:17 +00:00
|
|
|
buffer_size_add(prz, c);
|
2012-03-08 01:34:35 +00:00
|
|
|
|
|
|
|
start = buffer_start_add(prz, c);
|
|
|
|
|
|
|
|
rem = prz->buffer_size - start;
|
|
|
|
if (unlikely(rem < c)) {
|
|
|
|
persistent_ram_update(prz, s, start, rem);
|
2012-03-08 01:34:32 +00:00
|
|
|
s += rem;
|
|
|
|
c -= rem;
|
2012-03-08 01:34:35 +00:00
|
|
|
start = 0;
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
2012-03-08 01:34:35 +00:00
|
|
|
persistent_ram_update(prz, s, start, c);
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2012-03-08 01:34:33 +00:00
|
|
|
persistent_ram_update_header_ecc(prz);
|
2012-03-08 01:34:32 +00:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2016-09-01 15:13:46 +00:00
|
|
|
int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
|
|
|
|
const void __user *s, unsigned int count)
|
|
|
|
{
|
|
|
|
int rem, ret = 0, c = count;
|
|
|
|
size_t start;
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-04 02:57:57 +00:00
|
|
|
if (unlikely(!access_ok(s, count)))
|
2016-09-01 15:13:46 +00:00
|
|
|
return -EFAULT;
|
|
|
|
if (unlikely(c > prz->buffer_size)) {
|
|
|
|
s += c - prz->buffer_size;
|
|
|
|
c = prz->buffer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer_size_add(prz, c);
|
|
|
|
|
|
|
|
start = buffer_start_add(prz, c);
|
|
|
|
|
|
|
|
rem = prz->buffer_size - start;
|
|
|
|
if (unlikely(rem < c)) {
|
|
|
|
ret = persistent_ram_update_user(prz, s, start, rem);
|
|
|
|
s += rem;
|
|
|
|
c -= rem;
|
|
|
|
start = 0;
|
|
|
|
}
|
|
|
|
if (likely(!ret))
|
|
|
|
ret = persistent_ram_update_user(prz, s, start, c);
|
|
|
|
|
|
|
|
persistent_ram_update_header_ecc(prz);
|
|
|
|
|
|
|
|
return unlikely(ret) ? ret : count;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:32 +00:00
|
|
|
size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
return prz->old_log_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void *persistent_ram_old(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
return prz->old_log;
|
|
|
|
}
|
|
|
|
|
|
|
|
void persistent_ram_free_old(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
kfree(prz->old_log);
|
|
|
|
prz->old_log = NULL;
|
|
|
|
prz->old_log_size = 0;
|
|
|
|
}
|
|
|
|
|
2012-05-26 13:07:51 +00:00
|
|
|
void persistent_ram_zap(struct persistent_ram_zone *prz)
|
|
|
|
{
|
|
|
|
atomic_set(&prz->buffer->start, 0);
|
|
|
|
atomic_set(&prz->buffer->size, 0);
|
|
|
|
persistent_ram_update_header_ecc(prz);
|
|
|
|
}
|
|
|
|
|
2014-09-16 20:50:01 +00:00
|
|
|
static void *persistent_ram_vmap(phys_addr_t start, size_t size,
|
|
|
|
unsigned int memtype)
|
2012-03-08 01:34:32 +00:00
|
|
|
{
|
2012-03-08 01:34:34 +00:00
|
|
|
struct page **pages;
|
|
|
|
phys_addr_t page_start;
|
|
|
|
unsigned int page_count;
|
|
|
|
pgprot_t prot;
|
|
|
|
unsigned int i;
|
2012-05-12 00:17:43 +00:00
|
|
|
void *vaddr;
|
2012-03-08 01:34:34 +00:00
|
|
|
|
|
|
|
page_start = start - offset_in_page(start);
|
|
|
|
page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
|
|
|
|
|
2014-09-16 20:50:01 +00:00
|
|
|
if (memtype)
|
|
|
|
prot = pgprot_noncached(PAGE_KERNEL);
|
|
|
|
else
|
|
|
|
prot = pgprot_writecombine(PAGE_KERNEL);
|
2012-03-08 01:34:34 +00:00
|
|
|
|
2014-08-08 21:22:35 +00:00
|
|
|
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
|
2012-03-08 01:34:34 +00:00
|
|
|
if (!pages) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_err("%s: Failed to allocate array for %u pages\n",
|
|
|
|
__func__, page_count);
|
2012-05-12 00:17:43 +00:00
|
|
|
return NULL;
|
2012-03-08 01:34:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < page_count; i++) {
|
|
|
|
phys_addr_t addr = page_start + i * PAGE_SIZE;
|
|
|
|
pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
|
|
|
|
}
|
2012-05-12 00:17:43 +00:00
|
|
|
vaddr = vmap(pages, page_count, VM_MAP, prot);
|
2012-03-08 01:34:34 +00:00
|
|
|
kfree(pages);
|
2012-05-12 00:17:43 +00:00
|
|
|
|
2018-09-12 03:36:34 +00:00
|
|
|
/*
|
|
|
|
* Since vmap() uses page granularity, we must add the offset
|
|
|
|
* into the page here, to get the byte granularity address
|
|
|
|
* into the mapping to represent the actual "start" location.
|
|
|
|
*/
|
|
|
|
return vaddr + offset_in_page(start);
|
2012-05-12 00:17:43 +00:00
|
|
|
}
|
|
|
|
|
2014-09-16 20:50:01 +00:00
|
|
|
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
|
2018-10-18 00:20:35 +00:00
|
|
|
unsigned int memtype, char *label)
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
{
|
2014-09-16 20:50:01 +00:00
|
|
|
void *va;
|
|
|
|
|
2018-10-18 00:20:35 +00:00
|
|
|
if (!request_mem_region(start, size, label ?: "ramoops")) {
|
2018-10-26 08:14:01 +00:00
|
|
|
pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
|
|
|
|
label ?: "ramoops",
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
(unsigned long long)size, (unsigned long long)start);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-09-16 20:50:01 +00:00
|
|
|
if (memtype)
|
|
|
|
va = ioremap(start, size);
|
|
|
|
else
|
|
|
|
va = ioremap_wc(start, size);
|
|
|
|
|
2018-09-12 03:36:34 +00:00
|
|
|
/*
|
|
|
|
* Since request_mem_region() and ioremap() are byte-granularity
|
|
|
|
* there is no need handle anything special like we do when the
|
|
|
|
* vmap() case in persistent_ram_vmap() above.
|
|
|
|
*/
|
2014-09-16 20:50:01 +00:00
|
|
|
return va;
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
}
|
|
|
|
|
2012-05-12 00:17:43 +00:00
|
|
|
static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
|
2014-09-16 20:50:01 +00:00
|
|
|
struct persistent_ram_zone *prz, int memtype)
|
2012-05-12 00:17:43 +00:00
|
|
|
{
|
2012-05-12 00:18:05 +00:00
|
|
|
prz->paddr = start;
|
|
|
|
prz->size = size;
|
|
|
|
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
if (pfn_valid(start >> PAGE_SHIFT))
|
2014-09-16 20:50:01 +00:00
|
|
|
prz->vaddr = persistent_ram_vmap(start, size, memtype);
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
else
|
2018-10-18 00:20:35 +00:00
|
|
|
prz->vaddr = persistent_ram_iomap(start, size, memtype,
|
|
|
|
prz->label);
|
staging: android: persistent_ram: Make it possible to use memory outside of bootmem
This includes devices' memory (e.g. framebuffers or memory mapped
EEPROMs on a local bus), as well as the normal RAM that we don't use
for the main memory.
For the normal (but unused) ram we could use kmaps, but this assumes
highmem support, so we don't bother and just use the memory via
ioremap.
As a side effect, the following hack is possible: when used together
with pstore_ram (new ramoops) module, we can limit the normal RAM region
with mem= and then point ramoops to use the rest of the memory, e.g.
mem=128M ramoops.mem_address=0x8000000
Sure, we could just reserve the region with memblock_reserve() early in
the arch/ code, and then register a pstore_ram platform device pointing
to the reserved region. It's still a viable option if platform wants
to do so.
Also, we might want to use IO accessors in case of a real device,
but for now we don't bother (the old ramoops wasn't using it either, so
at least we don't make things worse).
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-05-12 00:17:54 +00:00
|
|
|
|
2012-03-08 01:34:34 +00:00
|
|
|
if (!prz->vaddr) {
|
2012-05-12 00:17:43 +00:00
|
|
|
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
|
|
|
|
(unsigned long long)size, (unsigned long long)start);
|
2012-03-08 01:34:34 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2018-09-12 03:36:34 +00:00
|
|
|
prz->buffer = prz->vaddr;
|
2012-03-08 01:34:34 +00:00
|
|
|
prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-21 23:02:05 +00:00
|
|
|
static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
|
pstore: Correctly initialize spinlock and flags
The ram backend wasn't always initializing its spinlock correctly. Since
it was coming from kzalloc memory, though, it was harmless on
architectures that initialize unlocked spinlocks to 0 (at least x86 and
ARM). This also fixes a possibly ignored flag setting too.
When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
[ 0.786131] pstore: using zlib compression
[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
[ 0.790747] Hardware name: Google Kevin (DT)
[ 0.790750] Call trace:
[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
[ 0.793717] console [pstore-1] enabled
[ 0.797845] pstore: Registered ramoops as persistent store backend
[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
Reported-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-02-09 23:43:44 +00:00
|
|
|
struct persistent_ram_ecc_info *ecc_info)
|
2012-03-08 01:34:34 +00:00
|
|
|
{
|
2012-05-12 00:17:25 +00:00
|
|
|
int ret;
|
2018-10-30 07:52:34 +00:00
|
|
|
bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2012-05-22 23:33:23 +00:00
|
|
|
ret = persistent_ram_init_ecc(prz, ecc_info);
|
2018-11-01 21:03:07 +00:00
|
|
|
if (ret) {
|
|
|
|
pr_warn("ECC failed %s\n", prz->label);
|
2012-05-12 00:17:25 +00:00
|
|
|
return ret;
|
2018-11-01 21:03:07 +00:00
|
|
|
}
|
2012-03-08 01:34:32 +00:00
|
|
|
|
pstore/ram: Make tracing log versioned
Decoding the binary trace w/ a different kernel might be troublesome
since we convert addresses to symbols. For kernels with minimal changes,
the mappings would probably match, but it's not guaranteed at all.
(But still we could convert the addresses by hand, since we do print
raw addresses.)
If we use modules, the symbols could be loaded at different addresses
from the previously booted kernel, and so this would also fail, but
there's nothing we can do about it.
Also, the binary data format that pstore/ram is using in its ringbuffer
may change between the kernels, so here we too must ensure that we're
running the same kernel.
So, there are two questions really:
1. How to compute the unique kernel tag;
2. Where to store it.
In this patch we're using LINUX_VERSION_CODE, just as hibernation
(suspend-to-disk) does. This way we are protecting from the kernel
version mismatch, making sure that we're running the same kernel
version and patch level. We could use CRC of a symbol table (as
suggested by Tony Luck), but for now let's not be that strict.
And as for storing, we are using a small trick here. Instead of
allocating a dedicated buffer for the tag (i.e. another prz), or
hacking ram_core routines to "reserve" some control data in the
buffer, we are just encoding the tag into the buffer signature
(and XOR'ing it with the actual signature value, so that buffers
not needing a tag can just pass zero, which will result into the
plain old PRZ signature).
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Suggested-by: Tony Luck <tony.luck@intel.com>
Suggested-by: Colin Cross <ccross@android.com>
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-07-17 19:11:12 +00:00
|
|
|
sig ^= PERSISTENT_RAM_SIG;
|
|
|
|
|
|
|
|
if (prz->buffer->sig == sig) {
|
2018-11-03 23:38:18 +00:00
|
|
|
if (buffer_size(prz) == 0) {
|
|
|
|
pr_debug("found existing empty buffer\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-08 01:34:35 +00:00
|
|
|
if (buffer_size(prz) > prz->buffer_size ||
|
2018-10-30 07:52:34 +00:00
|
|
|
buffer_start(prz) > buffer_size(prz)) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_info("found existing invalid buffer, size %zu, start %zu\n",
|
|
|
|
buffer_size(prz), buffer_start(prz));
|
2018-10-30 07:52:34 +00:00
|
|
|
zap = true;
|
|
|
|
} else {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_debug("found existing buffer, size %zu, start %zu\n",
|
|
|
|
buffer_size(prz), buffer_start(prz));
|
2012-03-08 01:34:32 +00:00
|
|
|
persistent_ram_save_old(prz);
|
|
|
|
}
|
|
|
|
} else {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_debug("no valid data in buffer (sig = 0x%08x)\n",
|
|
|
|
prz->buffer->sig);
|
2018-10-30 07:52:34 +00:00
|
|
|
prz->buffer->sig = sig;
|
|
|
|
zap = true;
|
2012-03-08 01:34:32 +00:00
|
|
|
}
|
|
|
|
|
2018-10-30 07:52:34 +00:00
|
|
|
/* Reset missing, invalid, or single-use memory area. */
|
|
|
|
if (zap)
|
|
|
|
persistent_ram_zap(prz);
|
2012-03-08 01:34:32 +00:00
|
|
|
|
2012-05-12 00:17:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-12 00:18:05 +00:00
|
|
|
void persistent_ram_free(struct persistent_ram_zone *prz)
|
|
|
|
{
|
2012-06-19 02:15:52 +00:00
|
|
|
if (!prz)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (prz->vaddr) {
|
|
|
|
if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
|
2018-09-12 03:36:34 +00:00
|
|
|
/* We must vunmap() at page-granularity. */
|
|
|
|
vunmap(prz->vaddr - offset_in_page(prz->paddr));
|
2012-06-19 02:15:52 +00:00
|
|
|
} else {
|
|
|
|
iounmap(prz->vaddr);
|
|
|
|
release_mem_region(prz->paddr, prz->size);
|
|
|
|
}
|
|
|
|
prz->vaddr = NULL;
|
2012-05-12 00:18:05 +00:00
|
|
|
}
|
2018-03-07 20:18:33 +00:00
|
|
|
if (prz->rs_decoder) {
|
|
|
|
free_rs(prz->rs_decoder);
|
|
|
|
prz->rs_decoder = NULL;
|
|
|
|
}
|
|
|
|
kfree(prz->ecc_info.par);
|
|
|
|
prz->ecc_info.par = NULL;
|
|
|
|
|
2012-05-12 00:18:05 +00:00
|
|
|
persistent_ram_free_old(prz);
|
2018-10-18 00:20:35 +00:00
|
|
|
kfree(prz->label);
|
2012-05-12 00:18:05 +00:00
|
|
|
kfree(prz);
|
|
|
|
}
|
|
|
|
|
2012-12-21 23:02:05 +00:00
|
|
|
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
|
2014-09-16 20:50:01 +00:00
|
|
|
u32 sig, struct persistent_ram_ecc_info *ecc_info,
|
2018-10-18 00:20:35 +00:00
|
|
|
unsigned int memtype, u32 flags, char *label)
|
2012-05-12 00:17:34 +00:00
|
|
|
{
|
|
|
|
struct persistent_ram_zone *prz;
|
|
|
|
int ret = -ENOMEM;
|
|
|
|
|
|
|
|
prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
|
|
|
|
if (!prz) {
|
2014-06-06 21:37:31 +00:00
|
|
|
pr_err("failed to allocate persistent ram zone\n");
|
2012-05-12 00:17:34 +00:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
pstore: Correctly initialize spinlock and flags
The ram backend wasn't always initializing its spinlock correctly. Since
it was coming from kzalloc memory, though, it was harmless on
architectures that initialize unlocked spinlocks to 0 (at least x86 and
ARM). This also fixes a possibly ignored flag setting too.
When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
[ 0.786131] pstore: using zlib compression
[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
[ 0.790747] Hardware name: Google Kevin (DT)
[ 0.790750] Call trace:
[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
[ 0.793717] console [pstore-1] enabled
[ 0.797845] pstore: Registered ramoops as persistent store backend
[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
Reported-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-02-09 23:43:44 +00:00
|
|
|
/* Initialize general buffer state. */
|
2017-03-06 06:08:58 +00:00
|
|
|
raw_spin_lock_init(&prz->buffer_lock);
|
pstore: Correctly initialize spinlock and flags
The ram backend wasn't always initializing its spinlock correctly. Since
it was coming from kzalloc memory, though, it was harmless on
architectures that initialize unlocked spinlocks to 0 (at least x86 and
ARM). This also fixes a possibly ignored flag setting too.
When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
[ 0.786131] pstore: using zlib compression
[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
[ 0.790747] Hardware name: Google Kevin (DT)
[ 0.790750] Call trace:
[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
[ 0.793717] console [pstore-1] enabled
[ 0.797845] pstore: Registered ramoops as persistent store backend
[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
Reported-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-02-09 23:43:44 +00:00
|
|
|
prz->flags = flags;
|
2020-01-08 18:06:54 +00:00
|
|
|
prz->label = kstrdup(label, GFP_KERNEL);
|
pstore: Correctly initialize spinlock and flags
The ram backend wasn't always initializing its spinlock correctly. Since
it was coming from kzalloc memory, though, it was harmless on
architectures that initialize unlocked spinlocks to 0 (at least x86 and
ARM). This also fixes a possibly ignored flag setting too.
When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
[ 0.786131] pstore: using zlib compression
[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
[ 0.790747] Hardware name: Google Kevin (DT)
[ 0.790750] Call trace:
[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
[ 0.793717] console [pstore-1] enabled
[ 0.797845] pstore: Registered ramoops as persistent store backend
[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
Reported-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-02-09 23:43:44 +00:00
|
|
|
|
2014-09-16 20:50:01 +00:00
|
|
|
ret = persistent_ram_buffer_map(start, size, prz, memtype);
|
2012-05-12 00:17:34 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
pstore: Correctly initialize spinlock and flags
The ram backend wasn't always initializing its spinlock correctly. Since
it was coming from kzalloc memory, though, it was harmless on
architectures that initialize unlocked spinlocks to 0 (at least x86 and
ARM). This also fixes a possibly ignored flag setting too.
When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
[ 0.786131] pstore: using zlib compression
[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
[ 0.790747] Hardware name: Google Kevin (DT)
[ 0.790750] Call trace:
[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
[ 0.793717] console [pstore-1] enabled
[ 0.797845] pstore: Registered ramoops as persistent store backend
[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
Reported-by: Brian Norris <briannorris@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-02-09 23:43:44 +00:00
|
|
|
ret = persistent_ram_post_init(prz, sig, ecc_info);
|
2012-06-19 02:15:52 +00:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2012-05-12 00:17:34 +00:00
|
|
|
|
2018-11-01 21:14:47 +00:00
|
|
|
pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
|
|
|
|
prz->label, prz->size, (unsigned long long)prz->paddr,
|
|
|
|
sizeof(*prz->buffer), prz->buffer_size,
|
|
|
|
prz->size - sizeof(*prz->buffer) - prz->buffer_size,
|
|
|
|
prz->ecc_info.ecc_size, prz->ecc_info.block_size);
|
|
|
|
|
2012-05-12 00:17:34 +00:00
|
|
|
return prz;
|
|
|
|
err:
|
2012-06-19 02:15:52 +00:00
|
|
|
persistent_ram_free(prz);
|
2012-05-12 00:17:34 +00:00
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|