forked from Minki/linux
68baf692c4
Historically struct device_node references were tracked using a kref embedded as a struct field. Commit75b57ecf9d
("of: Make device nodes kobjects so they show up in sysfs") (Mar 2014) refactored device_nodes to be kobjects such that the device tree could by more simply exposed to userspace using sysfs. Commit0829f6d1f6
("of: device_node kobject lifecycle fixes") (Mar 2014) followed up these changes to better control the kobject lifecycle and in particular the referecne counting via of_node_get(), of_node_put(), and of_node_init(). A result of this second commit was that it introduced an of_node_put() call when a dynamic node is detached, in of_node_remove(), that removes the initial kobj reference created by of_node_init(). Traditionally as the original dynamic device node user the pseries code had assumed responsibilty for releasing this final reference in its platform specific DLPAR detach code. This patch fixes a refcount underflow introduced by commit0829f6d1f6
, and recently exposed by the upstreaming of the recount API. Messages like the following are no longer seen in the kernel log with this patch following DLPAR remove operations of cpus and pci devices. rpadlpar_io: slot PHB 72 removed refcount_t: underflow; use-after-free. ------------[ cut here ]------------ WARNING: CPU: 5 PID: 3335 at lib/refcount.c:128 refcount_sub_and_test+0xf4/0x110 Fixes:0829f6d1f6
("of: device_node kobject lifecycle fixes") Cc: stable@vger.kernel.org # v3.15+ Signed-off-by: Tyrel Datwyler <tyreld@linux.vnet.ibm.com> [mpe: Make change log commit references more verbose] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
601 lines
13 KiB
C
601 lines
13 KiB
C
/*
|
|
* Support for dynamic reconfiguration for PCI, Memory, and CPU
|
|
* Hotplug and Dynamic Logical Partitioning on RPA platforms.
|
|
*
|
|
* Copyright (C) 2009 Nathan Fontenot
|
|
* Copyright (C) 2009 IBM Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License version
|
|
* 2 as published by the Free Software Foundation.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "dlpar: " fmt
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/of.h>
|
|
|
|
#include "of_helpers.h"
|
|
#include "pseries.h"
|
|
|
|
#include <asm/prom.h>
|
|
#include <asm/machdep.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/rtas.h>
|
|
|
|
static struct workqueue_struct *pseries_hp_wq;
|
|
|
|
struct pseries_hp_work {
|
|
struct work_struct work;
|
|
struct pseries_hp_errorlog *errlog;
|
|
struct completion *hp_completion;
|
|
int *rc;
|
|
};
|
|
|
|
struct cc_workarea {
|
|
__be32 drc_index;
|
|
__be32 zero;
|
|
__be32 name_offset;
|
|
__be32 prop_length;
|
|
__be32 prop_offset;
|
|
};
|
|
|
|
void dlpar_free_cc_property(struct property *prop)
|
|
{
|
|
kfree(prop->name);
|
|
kfree(prop->value);
|
|
kfree(prop);
|
|
}
|
|
|
|
static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
|
|
{
|
|
struct property *prop;
|
|
char *name;
|
|
char *value;
|
|
|
|
prop = kzalloc(sizeof(*prop), GFP_KERNEL);
|
|
if (!prop)
|
|
return NULL;
|
|
|
|
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
|
|
prop->name = kstrdup(name, GFP_KERNEL);
|
|
|
|
prop->length = be32_to_cpu(ccwa->prop_length);
|
|
value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
|
|
prop->value = kmemdup(value, prop->length, GFP_KERNEL);
|
|
if (!prop->value) {
|
|
dlpar_free_cc_property(prop);
|
|
return NULL;
|
|
}
|
|
|
|
return prop;
|
|
}
|
|
|
|
static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
|
|
const char *path)
|
|
{
|
|
struct device_node *dn;
|
|
char *name;
|
|
|
|
/* If parent node path is "/" advance path to NULL terminator to
|
|
* prevent double leading slashs in full_name.
|
|
*/
|
|
if (!path[1])
|
|
path++;
|
|
|
|
dn = kzalloc(sizeof(*dn), GFP_KERNEL);
|
|
if (!dn)
|
|
return NULL;
|
|
|
|
name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
|
|
dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
|
|
if (!dn->full_name) {
|
|
kfree(dn);
|
|
return NULL;
|
|
}
|
|
|
|
of_node_set_flag(dn, OF_DYNAMIC);
|
|
of_node_init(dn);
|
|
|
|
return dn;
|
|
}
|
|
|
|
static void dlpar_free_one_cc_node(struct device_node *dn)
|
|
{
|
|
struct property *prop;
|
|
|
|
while (dn->properties) {
|
|
prop = dn->properties;
|
|
dn->properties = prop->next;
|
|
dlpar_free_cc_property(prop);
|
|
}
|
|
|
|
kfree(dn->full_name);
|
|
kfree(dn);
|
|
}
|
|
|
|
void dlpar_free_cc_nodes(struct device_node *dn)
|
|
{
|
|
if (dn->child)
|
|
dlpar_free_cc_nodes(dn->child);
|
|
|
|
if (dn->sibling)
|
|
dlpar_free_cc_nodes(dn->sibling);
|
|
|
|
dlpar_free_one_cc_node(dn);
|
|
}
|
|
|
|
#define COMPLETE 0
|
|
#define NEXT_SIBLING 1
|
|
#define NEXT_CHILD 2
|
|
#define NEXT_PROPERTY 3
|
|
#define PREV_PARENT 4
|
|
#define MORE_MEMORY 5
|
|
#define CALL_AGAIN -2
|
|
#define ERR_CFG_USE -9003
|
|
|
|
struct device_node *dlpar_configure_connector(__be32 drc_index,
|
|
struct device_node *parent)
|
|
{
|
|
struct device_node *dn;
|
|
struct device_node *first_dn = NULL;
|
|
struct device_node *last_dn = NULL;
|
|
struct property *property;
|
|
struct property *last_property = NULL;
|
|
struct cc_workarea *ccwa;
|
|
char *data_buf;
|
|
const char *parent_path = parent->full_name;
|
|
int cc_token;
|
|
int rc = -1;
|
|
|
|
cc_token = rtas_token("ibm,configure-connector");
|
|
if (cc_token == RTAS_UNKNOWN_SERVICE)
|
|
return NULL;
|
|
|
|
data_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
|
|
if (!data_buf)
|
|
return NULL;
|
|
|
|
ccwa = (struct cc_workarea *)&data_buf[0];
|
|
ccwa->drc_index = drc_index;
|
|
ccwa->zero = 0;
|
|
|
|
do {
|
|
/* Since we release the rtas_data_buf lock between configure
|
|
* connector calls we want to re-populate the rtas_data_buffer
|
|
* with the contents of the previous call.
|
|
*/
|
|
spin_lock(&rtas_data_buf_lock);
|
|
|
|
memcpy(rtas_data_buf, data_buf, RTAS_DATA_BUF_SIZE);
|
|
rc = rtas_call(cc_token, 2, 1, NULL, rtas_data_buf, NULL);
|
|
memcpy(data_buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
|
|
|
|
spin_unlock(&rtas_data_buf_lock);
|
|
|
|
switch (rc) {
|
|
case COMPLETE:
|
|
break;
|
|
|
|
case NEXT_SIBLING:
|
|
dn = dlpar_parse_cc_node(ccwa, parent_path);
|
|
if (!dn)
|
|
goto cc_error;
|
|
|
|
dn->parent = last_dn->parent;
|
|
last_dn->sibling = dn;
|
|
last_dn = dn;
|
|
break;
|
|
|
|
case NEXT_CHILD:
|
|
if (first_dn)
|
|
parent_path = last_dn->full_name;
|
|
|
|
dn = dlpar_parse_cc_node(ccwa, parent_path);
|
|
if (!dn)
|
|
goto cc_error;
|
|
|
|
if (!first_dn) {
|
|
dn->parent = parent;
|
|
first_dn = dn;
|
|
} else {
|
|
dn->parent = last_dn;
|
|
if (last_dn)
|
|
last_dn->child = dn;
|
|
}
|
|
|
|
last_dn = dn;
|
|
break;
|
|
|
|
case NEXT_PROPERTY:
|
|
property = dlpar_parse_cc_property(ccwa);
|
|
if (!property)
|
|
goto cc_error;
|
|
|
|
if (!last_dn->properties)
|
|
last_dn->properties = property;
|
|
else
|
|
last_property->next = property;
|
|
|
|
last_property = property;
|
|
break;
|
|
|
|
case PREV_PARENT:
|
|
last_dn = last_dn->parent;
|
|
parent_path = last_dn->parent->full_name;
|
|
break;
|
|
|
|
case CALL_AGAIN:
|
|
break;
|
|
|
|
case MORE_MEMORY:
|
|
case ERR_CFG_USE:
|
|
default:
|
|
printk(KERN_ERR "Unexpected Error (%d) "
|
|
"returned from configure-connector\n", rc);
|
|
goto cc_error;
|
|
}
|
|
} while (rc);
|
|
|
|
cc_error:
|
|
kfree(data_buf);
|
|
|
|
if (rc) {
|
|
if (first_dn)
|
|
dlpar_free_cc_nodes(first_dn);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
return first_dn;
|
|
}
|
|
|
|
int dlpar_attach_node(struct device_node *dn)
|
|
{
|
|
int rc;
|
|
|
|
dn->parent = pseries_of_derive_parent(dn->full_name);
|
|
if (IS_ERR(dn->parent))
|
|
return PTR_ERR(dn->parent);
|
|
|
|
rc = of_attach_node(dn);
|
|
if (rc) {
|
|
printk(KERN_ERR "Failed to add device node %s\n",
|
|
dn->full_name);
|
|
return rc;
|
|
}
|
|
|
|
of_node_put(dn->parent);
|
|
return 0;
|
|
}
|
|
|
|
int dlpar_detach_node(struct device_node *dn)
|
|
{
|
|
struct device_node *child;
|
|
int rc;
|
|
|
|
child = of_get_next_child(dn, NULL);
|
|
while (child) {
|
|
dlpar_detach_node(child);
|
|
child = of_get_next_child(dn, child);
|
|
}
|
|
|
|
rc = of_detach_node(dn);
|
|
if (rc)
|
|
return rc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#define DR_ENTITY_SENSE 9003
|
|
#define DR_ENTITY_PRESENT 1
|
|
#define DR_ENTITY_UNUSABLE 2
|
|
#define ALLOCATION_STATE 9003
|
|
#define ALLOC_UNUSABLE 0
|
|
#define ALLOC_USABLE 1
|
|
#define ISOLATION_STATE 9001
|
|
#define ISOLATE 0
|
|
#define UNISOLATE 1
|
|
|
|
int dlpar_acquire_drc(u32 drc_index)
|
|
{
|
|
int dr_status, rc;
|
|
|
|
rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
|
|
DR_ENTITY_SENSE, drc_index);
|
|
if (rc || dr_status != DR_ENTITY_UNUSABLE)
|
|
return -1;
|
|
|
|
rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_USABLE);
|
|
if (rc)
|
|
return rc;
|
|
|
|
rc = rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
|
|
if (rc) {
|
|
rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
|
|
return rc;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int dlpar_release_drc(u32 drc_index)
|
|
{
|
|
int dr_status, rc;
|
|
|
|
rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
|
|
DR_ENTITY_SENSE, drc_index);
|
|
if (rc || dr_status != DR_ENTITY_PRESENT)
|
|
return -1;
|
|
|
|
rc = rtas_set_indicator(ISOLATION_STATE, drc_index, ISOLATE);
|
|
if (rc)
|
|
return rc;
|
|
|
|
rc = rtas_set_indicator(ALLOCATION_STATE, drc_index, ALLOC_UNUSABLE);
|
|
if (rc) {
|
|
rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
|
|
return rc;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
|
|
{
|
|
int rc;
|
|
|
|
/* pseries error logs are in BE format, convert to cpu type */
|
|
switch (hp_elog->id_type) {
|
|
case PSERIES_HP_ELOG_ID_DRC_COUNT:
|
|
hp_elog->_drc_u.drc_count =
|
|
be32_to_cpu(hp_elog->_drc_u.drc_count);
|
|
break;
|
|
case PSERIES_HP_ELOG_ID_DRC_INDEX:
|
|
hp_elog->_drc_u.drc_index =
|
|
be32_to_cpu(hp_elog->_drc_u.drc_index);
|
|
break;
|
|
case PSERIES_HP_ELOG_ID_DRC_IC:
|
|
hp_elog->_drc_u.ic.count =
|
|
be32_to_cpu(hp_elog->_drc_u.ic.count);
|
|
hp_elog->_drc_u.ic.index =
|
|
be32_to_cpu(hp_elog->_drc_u.ic.index);
|
|
}
|
|
|
|
switch (hp_elog->resource) {
|
|
case PSERIES_HP_ELOG_RESOURCE_MEM:
|
|
rc = dlpar_memory(hp_elog);
|
|
break;
|
|
case PSERIES_HP_ELOG_RESOURCE_CPU:
|
|
rc = dlpar_cpu(hp_elog);
|
|
break;
|
|
default:
|
|
pr_warn_ratelimited("Invalid resource (%d) specified\n",
|
|
hp_elog->resource);
|
|
rc = -EINVAL;
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static void pseries_hp_work_fn(struct work_struct *work)
|
|
{
|
|
struct pseries_hp_work *hp_work =
|
|
container_of(work, struct pseries_hp_work, work);
|
|
|
|
if (hp_work->rc)
|
|
*(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
|
|
else
|
|
handle_dlpar_errorlog(hp_work->errlog);
|
|
|
|
if (hp_work->hp_completion)
|
|
complete(hp_work->hp_completion);
|
|
|
|
kfree(hp_work->errlog);
|
|
kfree((void *)work);
|
|
}
|
|
|
|
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
|
|
struct completion *hotplug_done, int *rc)
|
|
{
|
|
struct pseries_hp_work *work;
|
|
struct pseries_hp_errorlog *hp_errlog_copy;
|
|
|
|
hp_errlog_copy = kmalloc(sizeof(struct pseries_hp_errorlog),
|
|
GFP_KERNEL);
|
|
memcpy(hp_errlog_copy, hp_errlog, sizeof(struct pseries_hp_errorlog));
|
|
|
|
work = kmalloc(sizeof(struct pseries_hp_work), GFP_KERNEL);
|
|
if (work) {
|
|
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
|
|
work->errlog = hp_errlog_copy;
|
|
work->hp_completion = hotplug_done;
|
|
work->rc = rc;
|
|
queue_work(pseries_hp_wq, (struct work_struct *)work);
|
|
} else {
|
|
*rc = -ENOMEM;
|
|
kfree(hp_errlog_copy);
|
|
complete(hotplug_done);
|
|
}
|
|
}
|
|
|
|
static int dlpar_parse_resource(char **cmd, struct pseries_hp_errorlog *hp_elog)
|
|
{
|
|
char *arg;
|
|
|
|
arg = strsep(cmd, " ");
|
|
if (!arg)
|
|
return -EINVAL;
|
|
|
|
if (sysfs_streq(arg, "memory")) {
|
|
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
|
|
} else if (sysfs_streq(arg, "cpu")) {
|
|
hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
|
|
} else {
|
|
pr_err("Invalid resource specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dlpar_parse_action(char **cmd, struct pseries_hp_errorlog *hp_elog)
|
|
{
|
|
char *arg;
|
|
|
|
arg = strsep(cmd, " ");
|
|
if (!arg)
|
|
return -EINVAL;
|
|
|
|
if (sysfs_streq(arg, "add")) {
|
|
hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
|
|
} else if (sysfs_streq(arg, "remove")) {
|
|
hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
|
|
} else {
|
|
pr_err("Invalid action specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
|
|
{
|
|
char *arg;
|
|
u32 count, index;
|
|
|
|
arg = strsep(cmd, " ");
|
|
if (!arg)
|
|
return -EINVAL;
|
|
|
|
if (sysfs_streq(arg, "indexed-count")) {
|
|
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_IC;
|
|
arg = strsep(cmd, " ");
|
|
if (!arg) {
|
|
pr_err("No DRC count specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtou32(arg, 0, &count)) {
|
|
pr_err("Invalid DRC count specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
arg = strsep(cmd, " ");
|
|
if (!arg) {
|
|
pr_err("No DRC Index specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtou32(arg, 0, &index)) {
|
|
pr_err("Invalid DRC Index specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
hp_elog->_drc_u.ic.count = cpu_to_be32(count);
|
|
hp_elog->_drc_u.ic.index = cpu_to_be32(index);
|
|
} else if (sysfs_streq(arg, "index")) {
|
|
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
|
|
arg = strsep(cmd, " ");
|
|
if (!arg) {
|
|
pr_err("No DRC Index specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtou32(arg, 0, &index)) {
|
|
pr_err("Invalid DRC Index specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
hp_elog->_drc_u.drc_index = cpu_to_be32(index);
|
|
} else if (sysfs_streq(arg, "count")) {
|
|
hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
|
|
arg = strsep(cmd, " ");
|
|
if (!arg) {
|
|
pr_err("No DRC count specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (kstrtou32(arg, 0, &count)) {
|
|
pr_err("Invalid DRC count specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
hp_elog->_drc_u.drc_count = cpu_to_be32(count);
|
|
} else {
|
|
pr_err("Invalid id_type specified.\n");
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct pseries_hp_errorlog *hp_elog;
|
|
struct completion hotplug_done;
|
|
char *argbuf;
|
|
char *args;
|
|
int rc;
|
|
|
|
args = argbuf = kstrdup(buf, GFP_KERNEL);
|
|
hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
|
|
if (!hp_elog || !argbuf) {
|
|
pr_info("Could not allocate resources for DLPAR operation\n");
|
|
kfree(argbuf);
|
|
kfree(hp_elog);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
/*
|
|
* Parse out the request from the user, this will be in the form:
|
|
* <resource> <action> <id_type> <id>
|
|
*/
|
|
rc = dlpar_parse_resource(&args, hp_elog);
|
|
if (rc)
|
|
goto dlpar_store_out;
|
|
|
|
rc = dlpar_parse_action(&args, hp_elog);
|
|
if (rc)
|
|
goto dlpar_store_out;
|
|
|
|
rc = dlpar_parse_id_type(&args, hp_elog);
|
|
if (rc)
|
|
goto dlpar_store_out;
|
|
|
|
init_completion(&hotplug_done);
|
|
queue_hotplug_event(hp_elog, &hotplug_done, &rc);
|
|
wait_for_completion(&hotplug_done);
|
|
|
|
dlpar_store_out:
|
|
kfree(argbuf);
|
|
kfree(hp_elog);
|
|
|
|
if (rc)
|
|
pr_err("Could not handle DLPAR request \"%s\"\n", buf);
|
|
|
|
return rc ? rc : count;
|
|
}
|
|
|
|
static ssize_t dlpar_show(struct class *class, struct class_attribute *attr,
|
|
char *buf)
|
|
{
|
|
return sprintf(buf, "%s\n", "memory,cpu");
|
|
}
|
|
|
|
static CLASS_ATTR(dlpar, S_IWUSR | S_IRUSR, dlpar_show, dlpar_store);
|
|
|
|
static int __init pseries_dlpar_init(void)
|
|
{
|
|
pseries_hp_wq = alloc_workqueue("pseries hotplug workqueue",
|
|
WQ_UNBOUND, 1);
|
|
return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
|
|
}
|
|
machine_device_initcall(pseries, pseries_dlpar_init);
|
|
|