2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2009-06-16 08:30:22 +00:00
|
|
|
* driver for channel subsystem
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
2010-10-25 14:10:28 +00:00
|
|
|
* Copyright IBM Corp. 2002, 2010
|
2009-06-16 08:30:22 +00:00
|
|
|
*
|
|
|
|
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
|
|
|
* Cornelia Huck (cornelia.huck@de.ibm.com)
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2008-12-25 12:39:36 +00:00
|
|
|
|
|
|
|
#define KMSG_COMPONENT "cio"
|
|
|
|
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/list.h>
|
2007-10-12 14:11:20 +00:00
|
|
|
#include <linux/reboot.h>
|
2009-06-16 08:30:22 +00:00
|
|
|
#include <linux/suspend.h>
|
2010-02-26 21:37:25 +00:00
|
|
|
#include <linux/proc_fs.h>
|
2008-07-14 07:58:58 +00:00
|
|
|
#include <asm/isc.h>
|
2009-03-26 14:24:01 +00:00
|
|
|
#include <asm/crw.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include "css.h"
|
|
|
|
#include "cio.h"
|
|
|
|
#include "cio_debug.h"
|
|
|
|
#include "ioasm.h"
|
|
|
|
#include "chsc.h"
|
2006-06-29 12:57:03 +00:00
|
|
|
#include "device.h"
|
2007-04-27 14:01:34 +00:00
|
|
|
#include "idset.h"
|
2007-04-27 14:01:35 +00:00
|
|
|
#include "chp.h"
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
int css_init_done = 0;
|
2009-09-22 20:58:37 +00:00
|
|
|
int max_ssid;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-10-12 14:11:13 +00:00
|
|
|
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
int
|
2006-01-06 08:19:22 +00:00
|
|
|
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
|
|
|
|
{
|
|
|
|
struct subchannel_id schid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
init_subchannel_id(&schid);
|
|
|
|
ret = -ENODEV;
|
|
|
|
do {
|
2006-01-06 08:19:25 +00:00
|
|
|
do {
|
|
|
|
ret = fn(schid, data);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
} while (schid.sch_no++ < __MAX_SUBCHANNEL);
|
|
|
|
schid.sch_no = 0;
|
|
|
|
} while (schid.ssid++ < max_ssid);
|
2006-01-06 08:19:22 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:48 +00:00
|
|
|
struct cb_data {
|
|
|
|
void *data;
|
|
|
|
struct idset *set;
|
|
|
|
int (*fn_known_sch)(struct subchannel *, void *);
|
|
|
|
int (*fn_unknown_sch)(struct subchannel_id, void *);
|
|
|
|
};
|
|
|
|
|
|
|
|
static int call_fn_known_sch(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
idset_sch_del(cb->set, sch->schid);
|
|
|
|
if (cb->fn_known_sch)
|
|
|
|
rc = cb->fn_known_sch(sch, cb->data);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
if (idset_sch_contains(cb->set, schid))
|
|
|
|
rc = cb->fn_unknown_sch(schid, cb->data);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2009-03-26 14:24:11 +00:00
|
|
|
static int call_fn_all_sch(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
struct cb_data *cb = data;
|
|
|
|
struct subchannel *sch;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
sch = get_subchannel_by_schid(schid);
|
|
|
|
if (sch) {
|
|
|
|
if (cb->fn_known_sch)
|
|
|
|
rc = cb->fn_known_sch(sch, cb->data);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
} else {
|
|
|
|
if (cb->fn_unknown_sch)
|
|
|
|
rc = cb->fn_unknown_sch(schid, cb->data);
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:48 +00:00
|
|
|
int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
|
|
|
|
int (*fn_unknown)(struct subchannel_id,
|
|
|
|
void *), void *data)
|
|
|
|
{
|
|
|
|
struct cb_data cb;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
cb.data = data;
|
|
|
|
cb.fn_known_sch = fn_known;
|
|
|
|
cb.fn_unknown_sch = fn_unknown;
|
2009-03-26 14:24:11 +00:00
|
|
|
|
|
|
|
cb.set = idset_sch_new();
|
|
|
|
if (!cb.set)
|
|
|
|
/* fall back to brute force scanning in case of oom */
|
|
|
|
return for_each_subchannel(call_fn_all_sch, &cb);
|
|
|
|
|
|
|
|
idset_fill(cb.set);
|
|
|
|
|
2008-01-26 13:10:48 +00:00
|
|
|
/* Process registered subchannels. */
|
|
|
|
rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
/* Process unregistered subchannels. */
|
|
|
|
if (fn_unknown)
|
|
|
|
rc = for_each_subchannel(call_fn_unknown_sch, &cb);
|
|
|
|
out:
|
|
|
|
idset_free(cb.set);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2009-12-07 11:51:18 +00:00
|
|
|
static void css_sch_todo(struct work_struct *work);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
static struct subchannel *
|
2006-01-06 08:19:21 +00:00
|
|
|
css_alloc_subchannel(struct subchannel_id schid)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA);
|
|
|
|
if (sch == NULL)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
2006-01-06 08:19:21 +00:00
|
|
|
ret = cio_validate_subchannel (sch, schid);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
kfree(sch);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
2009-12-07 11:51:18 +00:00
|
|
|
INIT_WORK(&sch->todo_work, css_sch_todo);
|
2005-04-16 22:20:36 +00:00
|
|
|
return sch;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
css_subchannel_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2006-12-08 14:54:26 +00:00
|
|
|
if (!cio_is_console(sch->schid)) {
|
2009-09-11 08:28:16 +00:00
|
|
|
/* Reset intparm to zeroes. */
|
|
|
|
sch->config.intparm = 0;
|
|
|
|
cio_commit_config(sch);
|
2006-12-08 14:54:26 +00:00
|
|
|
kfree(sch->lock);
|
2005-04-16 22:20:36 +00:00
|
|
|
kfree(sch);
|
2006-12-08 14:54:26 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-27 10:29:10 +00:00
|
|
|
static int css_sch_device_register(struct subchannel *sch)
|
2006-07-12 14:39:50 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
mutex_lock(&sch->reg_mutex);
|
2009-09-11 08:28:25 +00:00
|
|
|
dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
|
|
|
|
sch->schid.sch_no);
|
2006-07-12 14:39:50 +00:00
|
|
|
ret = device_register(&sch->dev);
|
|
|
|
mutex_unlock(&sch->reg_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:58:47 +00:00
|
|
|
/**
|
|
|
|
* css_sch_device_unregister - unregister a subchannel
|
|
|
|
* @sch: subchannel to be unregistered
|
|
|
|
*/
|
2006-07-12 14:39:50 +00:00
|
|
|
void css_sch_device_unregister(struct subchannel *sch)
|
|
|
|
{
|
|
|
|
mutex_lock(&sch->reg_mutex);
|
2008-07-14 07:59:20 +00:00
|
|
|
if (device_is_registered(&sch->dev))
|
|
|
|
device_unregister(&sch->dev);
|
2006-07-12 14:39:50 +00:00
|
|
|
mutex_unlock(&sch->reg_mutex);
|
|
|
|
}
|
2008-07-14 07:58:47 +00:00
|
|
|
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
|
2006-07-12 14:39:50 +00:00
|
|
|
|
2009-12-07 11:51:18 +00:00
|
|
|
static void css_sch_todo(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
enum sch_todo todo;
|
|
|
|
|
|
|
|
sch = container_of(work, struct subchannel, todo_work);
|
|
|
|
/* Find out todo. */
|
|
|
|
spin_lock_irq(sch->lock);
|
|
|
|
todo = sch->todo;
|
|
|
|
CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
|
|
|
|
sch->schid.sch_no, todo);
|
|
|
|
sch->todo = SCH_TODO_NOTHING;
|
|
|
|
spin_unlock_irq(sch->lock);
|
|
|
|
/* Perform todo. */
|
|
|
|
if (todo == SCH_TODO_UNREG)
|
|
|
|
css_sch_device_unregister(sch);
|
|
|
|
/* Release workqueue ref. */
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* css_sched_sch_todo - schedule a subchannel operation
|
|
|
|
* @sch: subchannel
|
|
|
|
* @todo: todo
|
|
|
|
*
|
|
|
|
* Schedule the operation identified by @todo to be performed on the slow path
|
|
|
|
* workqueue. Do nothing if another operation with higher priority is already
|
|
|
|
* scheduled. Needs to be called with subchannel lock held.
|
|
|
|
*/
|
|
|
|
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
|
|
|
|
{
|
|
|
|
CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, todo);
|
|
|
|
if (sch->todo >= todo)
|
|
|
|
return;
|
|
|
|
/* Get workqueue ref. */
|
|
|
|
if (!get_device(&sch->dev))
|
|
|
|
return;
|
|
|
|
sch->todo = todo;
|
2010-02-26 21:37:24 +00:00
|
|
|
if (!queue_work(cio_work_q, &sch->todo_work)) {
|
2009-12-07 11:51:18 +00:00
|
|
|
/* Already queued, release workqueue ref. */
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-04-27 14:01:35 +00:00
|
|
|
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int mask;
|
|
|
|
|
|
|
|
memset(ssd, 0, sizeof(struct chsc_ssd_info));
|
|
|
|
ssd->path_mask = pmcw->pim;
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
mask = 0x80 >> i;
|
|
|
|
if (pmcw->pim & mask) {
|
|
|
|
chp_id_init(&ssd->chpid[i]);
|
|
|
|
ssd->chpid[i].id = pmcw->chpid[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ssd_register_chpids(struct chsc_ssd_info *ssd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int mask;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
mask = 0x80 >> i;
|
|
|
|
if (ssd->path_mask & mask)
|
|
|
|
if (!chp_is_registered(ssd->chpid[i]))
|
|
|
|
chp_new(ssd->chpid[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void css_update_ssd_info(struct subchannel *sch)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (cio_is_console(sch->schid)) {
|
|
|
|
/* Console is initialized too early for functions requiring
|
|
|
|
* memory allocation. */
|
|
|
|
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
|
|
|
|
} else {
|
|
|
|
ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
|
|
|
|
if (ret)
|
|
|
|
ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
|
|
|
|
ssd_register_chpids(&sch->ssd_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:58:44 +00:00
|
|
|
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "%01x\n", sch->st);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(type, 0444, type_show, NULL);
|
|
|
|
|
|
|
|
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
|
|
|
|
return sprintf(buf, "css:t%01X\n", sch->st);
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
|
|
|
|
|
|
|
|
static struct attribute *subch_attrs[] = {
|
|
|
|
&dev_attr_type.attr,
|
|
|
|
&dev_attr_modalias.attr,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct attribute_group subch_attr_group = {
|
|
|
|
.attrs = subch_attrs,
|
|
|
|
};
|
|
|
|
|
2009-06-24 17:06:31 +00:00
|
|
|
static const struct attribute_group *default_subch_attr_groups[] = {
|
2008-07-14 07:58:44 +00:00
|
|
|
&subch_attr_group,
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2007-04-27 14:01:35 +00:00
|
|
|
static int css_register_subchannel(struct subchannel *sch)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Initialize the subchannel structure */
|
2007-10-12 14:11:13 +00:00
|
|
|
sch->dev.parent = &channel_subsystems[0]->device;
|
2005-04-16 22:20:36 +00:00
|
|
|
sch->dev.bus = &css_bus_type;
|
|
|
|
sch->dev.release = &css_subchannel_release;
|
2008-07-14 07:58:44 +00:00
|
|
|
sch->dev.groups = default_subch_attr_groups;
|
2007-10-22 10:52:41 +00:00
|
|
|
/*
|
|
|
|
* We don't want to generate uevents for I/O subchannels that don't
|
|
|
|
* have a working ccw device behind them since they will be
|
|
|
|
* unregistered before they can be used anyway, so we delay the add
|
|
|
|
* uevent until after device recognition was successful.
|
2008-07-14 07:58:44 +00:00
|
|
|
* Note that we suppress the uevent for all subchannel types;
|
|
|
|
* the subchannel driver can decide itself when it wants to inform
|
|
|
|
* userspace of its existence.
|
2007-10-22 10:52:41 +00:00
|
|
|
*/
|
2009-03-01 13:10:49 +00:00
|
|
|
dev_set_uevent_suppress(&sch->dev, 1);
|
2007-04-27 14:01:35 +00:00
|
|
|
css_update_ssd_info(sch);
|
2005-04-16 22:20:36 +00:00
|
|
|
/* make it known to the system */
|
2006-07-12 14:39:50 +00:00
|
|
|
ret = css_sch_device_register(sch);
|
2006-12-08 14:54:21 +00:00
|
|
|
if (ret) {
|
2007-07-27 10:29:19 +00:00
|
|
|
CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, ret);
|
2006-12-08 14:54:21 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2008-07-14 07:58:44 +00:00
|
|
|
if (!sch->driver) {
|
|
|
|
/*
|
|
|
|
* No driver matched. Generate the uevent now so that
|
|
|
|
* a fitting driver module may be loaded based on the
|
|
|
|
* modalias.
|
|
|
|
*/
|
2009-03-01 13:10:49 +00:00
|
|
|
dev_set_uevent_suppress(&sch->dev, 0);
|
2008-07-14 07:58:44 +00:00
|
|
|
kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:58:45 +00:00
|
|
|
int css_probe_device(struct subchannel_id schid)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct subchannel *sch;
|
|
|
|
|
2009-09-22 20:58:38 +00:00
|
|
|
if (cio_is_console(schid))
|
|
|
|
sch = cio_get_console_subchannel();
|
|
|
|
else {
|
|
|
|
sch = css_alloc_subchannel(schid);
|
|
|
|
if (IS_ERR(sch))
|
|
|
|
return PTR_ERR(sch);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
ret = css_register_subchannel(sch);
|
2009-09-22 20:58:38 +00:00
|
|
|
if (ret) {
|
|
|
|
if (!cio_is_console(schid))
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2005-06-25 21:55:27 +00:00
|
|
|
static int
|
|
|
|
check_subchannel(struct device * dev, void * data)
|
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2006-01-06 08:19:21 +00:00
|
|
|
struct subchannel_id *schid = data;
|
2005-06-25 21:55:27 +00:00
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2006-01-06 08:19:21 +00:00
|
|
|
return schid_equal(&sch->schid, schid);
|
2005-06-25 21:55:27 +00:00
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct subchannel *
|
2006-01-06 08:19:21 +00:00
|
|
|
get_subchannel_by_schid(struct subchannel_id schid)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
2005-06-25 21:55:27 +00:00
|
|
|
dev = bus_find_device(&css_bus_type, NULL,
|
2006-10-11 13:31:47 +00:00
|
|
|
&schid, check_subchannel);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2005-06-25 21:55:27 +00:00
|
|
|
return dev ? to_subchannel(dev) : NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:45 +00:00
|
|
|
/**
|
|
|
|
* css_sch_is_valid() - check if a subchannel is valid
|
|
|
|
* @schib: subchannel information block for the subchannel
|
|
|
|
*/
|
|
|
|
int css_sch_is_valid(struct schib *schib)
|
|
|
|
{
|
|
|
|
if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
|
|
|
|
return 0;
|
2008-07-14 07:58:48 +00:00
|
|
|
if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
|
|
|
|
return 0;
|
2008-01-26 13:10:45 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_sch_is_valid);
|
|
|
|
|
2006-09-20 14:00:01 +00:00
|
|
|
static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
|
|
|
|
{
|
|
|
|
struct schib schib;
|
|
|
|
|
|
|
|
if (!slow) {
|
|
|
|
/* Will be done on the slow path. */
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2008-01-26 13:10:45 +00:00
|
|
|
if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) {
|
2006-09-20 14:00:01 +00:00
|
|
|
/* Unusable - ignore. */
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-12-07 11:51:17 +00:00
|
|
|
CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
|
|
|
|
schid.sch_no);
|
2006-09-20 14:00:01 +00:00
|
|
|
|
|
|
|
return css_probe_device(schid);
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:58:45 +00:00
|
|
|
static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (sch->driver) {
|
|
|
|
if (sch->driver->sch_event)
|
|
|
|
ret = sch->driver->sch_event(sch, slow);
|
|
|
|
else
|
|
|
|
dev_dbg(&sch->dev,
|
|
|
|
"Got subchannel machine check but "
|
|
|
|
"no sch_event handler provided.\n");
|
|
|
|
}
|
2009-12-07 11:51:17 +00:00
|
|
|
if (ret != 0 && ret != -EAGAIN) {
|
|
|
|
CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
|
|
|
|
sch->schid.ssid, sch->schid.sch_no, ret);
|
|
|
|
}
|
2008-07-14 07:58:45 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2007-04-27 14:01:34 +00:00
|
|
|
static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
|
2006-09-20 14:00:01 +00:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sch = get_subchannel_by_schid(schid);
|
|
|
|
if (sch) {
|
|
|
|
ret = css_evaluate_known_subchannel(sch, slow);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
} else
|
|
|
|
ret = css_evaluate_new_subchannel(schid, slow);
|
2007-04-27 14:01:34 +00:00
|
|
|
if (ret == -EAGAIN)
|
|
|
|
css_schedule_eval(schid);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-04-27 14:01:34 +00:00
|
|
|
static struct idset *slow_subchannel_set;
|
|
|
|
static spinlock_t slow_subchannel_lock;
|
2009-09-22 20:58:34 +00:00
|
|
|
static wait_queue_head_t css_eval_wq;
|
|
|
|
static atomic_t css_eval_scheduled;
|
2007-04-27 14:01:34 +00:00
|
|
|
|
|
|
|
static int __init slow_subchannel_init(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-04-27 14:01:34 +00:00
|
|
|
spin_lock_init(&slow_subchannel_lock);
|
2009-09-22 20:58:34 +00:00
|
|
|
atomic_set(&css_eval_scheduled, 0);
|
|
|
|
init_waitqueue_head(&css_eval_wq);
|
2007-04-27 14:01:34 +00:00
|
|
|
slow_subchannel_set = idset_sch_new();
|
|
|
|
if (!slow_subchannel_set) {
|
2007-07-27 10:29:19 +00:00
|
|
|
CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
|
2007-04-27 14:01:34 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:48 +00:00
|
|
|
static int slow_eval_known_fn(struct subchannel *sch, void *data)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-26 13:10:48 +00:00
|
|
|
int eval;
|
|
|
|
int rc;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
spin_lock_irq(&slow_subchannel_lock);
|
2008-01-26 13:10:48 +00:00
|
|
|
eval = idset_sch_contains(slow_subchannel_set, sch->schid);
|
|
|
|
idset_sch_del(slow_subchannel_set, sch->schid);
|
|
|
|
spin_unlock_irq(&slow_subchannel_lock);
|
|
|
|
if (eval) {
|
|
|
|
rc = css_evaluate_known_subchannel(sch, 1);
|
|
|
|
if (rc == -EAGAIN)
|
|
|
|
css_schedule_eval(sch->schid);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-01-26 13:10:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
|
|
|
|
{
|
|
|
|
int eval;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
spin_lock_irq(&slow_subchannel_lock);
|
|
|
|
eval = idset_sch_contains(slow_subchannel_set, schid);
|
|
|
|
idset_sch_del(slow_subchannel_set, schid);
|
2005-04-16 22:20:36 +00:00
|
|
|
spin_unlock_irq(&slow_subchannel_lock);
|
2008-01-26 13:10:48 +00:00
|
|
|
if (eval) {
|
|
|
|
rc = css_evaluate_new_subchannel(schid, 1);
|
|
|
|
switch (rc) {
|
|
|
|
case -EAGAIN:
|
|
|
|
css_schedule_eval(schid);
|
|
|
|
rc = 0;
|
|
|
|
break;
|
|
|
|
case -ENXIO:
|
|
|
|
case -ENOMEM:
|
|
|
|
case -EIO:
|
|
|
|
/* These should abort looping */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void css_slow_path_func(struct work_struct *unused)
|
|
|
|
{
|
2009-09-22 20:58:34 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
2008-01-26 13:10:48 +00:00
|
|
|
CIO_TRACE_EVENT(4, "slowpath");
|
|
|
|
for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
|
|
|
|
NULL);
|
2009-09-22 20:58:34 +00:00
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
if (idset_is_empty(slow_subchannel_set)) {
|
|
|
|
atomic_set(&css_eval_scheduled, 0);
|
|
|
|
wake_up(&css_eval_wq);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-04-27 14:01:34 +00:00
|
|
|
static DECLARE_WORK(slow_path_work, css_slow_path_func);
|
2010-02-26 21:37:24 +00:00
|
|
|
struct workqueue_struct *cio_work_q;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-04-27 14:01:34 +00:00
|
|
|
void css_schedule_eval(struct subchannel_id schid)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
idset_sch_add(slow_subchannel_set, schid);
|
2009-09-22 20:58:34 +00:00
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2010-02-26 21:37:24 +00:00
|
|
|
queue_work(cio_work_q, &slow_path_work);
|
2007-04-27 14:01:34 +00:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
void css_schedule_eval_all(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
idset_fill(slow_subchannel_set);
|
2009-09-22 20:58:34 +00:00
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2010-02-26 21:37:24 +00:00
|
|
|
queue_work(cio_work_q, &slow_path_work);
|
2007-04-27 14:01:34 +00:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
|
|
|
}
|
|
|
|
|
2009-09-22 20:58:38 +00:00
|
|
|
static int __unset_registered(struct device *dev, void *data)
|
2008-04-17 05:45:59 +00:00
|
|
|
{
|
2009-09-22 20:58:38 +00:00
|
|
|
struct idset *set = data;
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
2006-06-29 12:57:03 +00:00
|
|
|
|
2009-09-22 20:58:38 +00:00
|
|
|
idset_sch_del(set, sch->schid);
|
|
|
|
return 0;
|
2009-03-26 14:24:20 +00:00
|
|
|
}
|
|
|
|
|
2010-10-25 14:10:23 +00:00
|
|
|
static void css_schedule_eval_all_unreg(void)
|
2006-06-29 12:57:03 +00:00
|
|
|
{
|
2009-09-22 20:58:38 +00:00
|
|
|
unsigned long flags;
|
|
|
|
struct idset *unreg_set;
|
2006-06-29 12:57:03 +00:00
|
|
|
|
2009-09-22 20:58:38 +00:00
|
|
|
/* Find unregistered subchannels. */
|
|
|
|
unreg_set = idset_sch_new();
|
|
|
|
if (!unreg_set) {
|
|
|
|
/* Fallback. */
|
|
|
|
css_schedule_eval_all();
|
2009-03-26 14:24:20 +00:00
|
|
|
return;
|
|
|
|
}
|
2009-09-22 20:58:38 +00:00
|
|
|
idset_fill(unreg_set);
|
|
|
|
bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
|
|
|
|
/* Apply to slow_subchannel_set. */
|
|
|
|
spin_lock_irqsave(&slow_subchannel_lock, flags);
|
|
|
|
idset_add_set(slow_subchannel_set, unreg_set);
|
|
|
|
atomic_set(&css_eval_scheduled, 1);
|
2010-02-26 21:37:24 +00:00
|
|
|
queue_work(cio_work_q, &slow_path_work);
|
2009-09-22 20:58:38 +00:00
|
|
|
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
|
|
|
|
idset_free(unreg_set);
|
2006-06-29 12:57:03 +00:00
|
|
|
}
|
|
|
|
|
2009-09-22 20:58:38 +00:00
|
|
|
void css_wait_for_slow_path(void)
|
|
|
|
{
|
2010-02-26 21:37:24 +00:00
|
|
|
flush_workqueue(cio_work_q);
|
2009-09-22 20:58:38 +00:00
|
|
|
}
|
2006-06-29 12:57:03 +00:00
|
|
|
|
|
|
|
/* Schedule reprobing of all unregistered subchannels. */
|
|
|
|
void css_schedule_reprobe(void)
|
|
|
|
{
|
2009-09-22 20:58:38 +00:00
|
|
|
css_schedule_eval_all_unreg();
|
2006-06-29 12:57:03 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Called from the machine check handler for subchannel report words.
|
|
|
|
*/
|
2008-07-14 07:58:46 +00:00
|
|
|
static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-06 08:19:21 +00:00
|
|
|
struct subchannel_id mchk_schid;
|
2011-01-05 11:47:58 +00:00
|
|
|
struct subchannel *sch;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-14 07:58:46 +00:00
|
|
|
if (overflow) {
|
|
|
|
css_schedule_eval_all();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
|
|
|
|
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
|
|
|
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
|
|
|
|
crw0->erc, crw0->rsid);
|
|
|
|
if (crw1)
|
|
|
|
CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
|
|
|
|
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
|
|
|
|
crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
|
|
|
|
crw1->anc, crw1->erc, crw1->rsid);
|
2006-01-06 08:19:21 +00:00
|
|
|
init_subchannel_id(&mchk_schid);
|
2008-07-14 07:58:46 +00:00
|
|
|
mchk_schid.sch_no = crw0->rsid;
|
|
|
|
if (crw1)
|
2010-12-01 09:08:02 +00:00
|
|
|
mchk_schid.ssid = (crw1->rsid >> 4) & 3;
|
2006-01-06 08:19:25 +00:00
|
|
|
|
2011-01-05 11:47:58 +00:00
|
|
|
if (crw0->erc == CRW_ERC_PMOD) {
|
|
|
|
sch = get_subchannel_by_schid(mchk_schid);
|
|
|
|
if (sch) {
|
|
|
|
css_update_ssd_info(sch);
|
|
|
|
put_device(&sch->dev);
|
|
|
|
}
|
|
|
|
}
|
2008-07-14 07:58:46 +00:00
|
|
|
/*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Since we are always presented with IPI in the CRW, we have to
|
|
|
|
* use stsch() to find out if the subchannel in question has come
|
|
|
|
* or gone.
|
|
|
|
*/
|
2007-04-27 14:01:34 +00:00
|
|
|
css_evaluate_subchannel(mchk_schid, 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __init
|
2006-01-06 08:19:23 +00:00
|
|
|
css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-05-17 08:00:00 +00:00
|
|
|
struct cpuid cpu_id;
|
|
|
|
|
2008-07-14 07:58:57 +00:00
|
|
|
if (css_general_characteristics.mcss) {
|
2006-01-06 08:19:23 +00:00
|
|
|
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
|
|
|
|
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
|
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2009-03-26 14:24:42 +00:00
|
|
|
css->global_pgid.pgid_high.cpu_addr = stap();
|
2005-04-16 22:20:36 +00:00
|
|
|
#else
|
2006-01-06 08:19:23 +00:00
|
|
|
css->global_pgid.pgid_high.cpu_addr = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
}
|
2010-05-17 08:00:00 +00:00
|
|
|
get_cpu_id(&cpu_id);
|
|
|
|
css->global_pgid.cpu_id = cpu_id.ident;
|
|
|
|
css->global_pgid.cpu_model = cpu_id.machine;
|
2006-01-06 08:19:23 +00:00
|
|
|
css->global_pgid.tod_high = tod_high;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2006-01-06 08:19:26 +00:00
|
|
|
static void
|
|
|
|
channel_subsystem_release(struct device *dev)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
|
|
|
css = to_css(dev);
|
2006-03-24 11:15:14 +00:00
|
|
|
mutex_destroy(&css->mutex);
|
2008-09-09 10:38:57 +00:00
|
|
|
if (css->pseudo_subchannel) {
|
|
|
|
/* Implies that it has been generated but never registered. */
|
|
|
|
css_subchannel_release(&css->pseudo_subchannel->dev);
|
|
|
|
css->pseudo_subchannel = NULL;
|
|
|
|
}
|
2006-01-06 08:19:26 +00:00
|
|
|
kfree(css);
|
|
|
|
}
|
|
|
|
|
2006-03-24 11:15:14 +00:00
|
|
|
static ssize_t
|
|
|
|
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css = to_css(dev);
|
2008-04-17 05:46:01 +00:00
|
|
|
int ret;
|
2006-03-24 11:15:14 +00:00
|
|
|
|
|
|
|
if (!css)
|
|
|
|
return 0;
|
2008-04-17 05:46:01 +00:00
|
|
|
mutex_lock(&css->mutex);
|
|
|
|
ret = sprintf(buf, "%x\n", css->cm_enabled);
|
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
return ret;
|
2006-03-24 11:15:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t
|
|
|
|
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css = to_css(dev);
|
|
|
|
int ret;
|
2008-04-30 11:38:33 +00:00
|
|
|
unsigned long val;
|
2006-03-24 11:15:14 +00:00
|
|
|
|
2008-04-30 11:38:33 +00:00
|
|
|
ret = strict_strtoul(buf, 16, &val);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2008-04-17 05:46:01 +00:00
|
|
|
mutex_lock(&css->mutex);
|
2008-04-30 11:38:33 +00:00
|
|
|
switch (val) {
|
|
|
|
case 0:
|
2006-03-24 11:15:14 +00:00
|
|
|
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
|
|
|
|
break;
|
2008-04-30 11:38:33 +00:00
|
|
|
case 1:
|
2006-03-24 11:15:14 +00:00
|
|
|
ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = -EINVAL;
|
|
|
|
}
|
2008-04-17 05:46:01 +00:00
|
|
|
mutex_unlock(&css->mutex);
|
2006-03-24 11:15:14 +00:00
|
|
|
return ret < 0 ? ret : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
|
|
|
|
|
2007-02-05 20:18:53 +00:00
|
|
|
static int __init setup_css(int nr)
|
2006-01-06 08:19:23 +00:00
|
|
|
{
|
|
|
|
u32 tod_high;
|
2006-12-08 14:54:28 +00:00
|
|
|
int ret;
|
2007-10-12 14:11:13 +00:00
|
|
|
struct channel_subsystem *css;
|
2006-01-06 08:19:23 +00:00
|
|
|
|
2007-10-12 14:11:13 +00:00
|
|
|
css = channel_subsystems[nr];
|
|
|
|
memset(css, 0, sizeof(struct channel_subsystem));
|
|
|
|
css->pseudo_subchannel =
|
|
|
|
kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
|
|
|
|
if (!css->pseudo_subchannel)
|
2006-12-08 14:54:28 +00:00
|
|
|
return -ENOMEM;
|
2007-10-12 14:11:13 +00:00
|
|
|
css->pseudo_subchannel->dev.parent = &css->device;
|
|
|
|
css->pseudo_subchannel->dev.release = css_subchannel_release;
|
2008-10-10 19:33:10 +00:00
|
|
|
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
|
2009-12-07 11:51:17 +00:00
|
|
|
mutex_init(&css->pseudo_subchannel->reg_mutex);
|
2007-10-12 14:11:13 +00:00
|
|
|
ret = cio_create_sch_lock(css->pseudo_subchannel);
|
2006-12-08 14:54:28 +00:00
|
|
|
if (ret) {
|
2007-10-12 14:11:13 +00:00
|
|
|
kfree(css->pseudo_subchannel);
|
2006-12-08 14:54:28 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2007-10-12 14:11:13 +00:00
|
|
|
mutex_init(&css->mutex);
|
|
|
|
css->valid = 1;
|
|
|
|
css->cssid = nr;
|
2008-10-10 19:33:10 +00:00
|
|
|
dev_set_name(&css->device, "css%x", nr);
|
2007-10-12 14:11:13 +00:00
|
|
|
css->device.release = channel_subsystem_release;
|
2006-01-06 08:19:23 +00:00
|
|
|
tod_high = (u32) (get_clock() >> 32);
|
2007-10-12 14:11:13 +00:00
|
|
|
css_generate_pgid(css, tod_high);
|
2006-12-08 14:54:28 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-10-12 14:11:20 +00:00
|
|
|
static int css_reboot_event(struct notifier_block *this,
|
|
|
|
unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
ret = NOTIFY_DONE;
|
|
|
|
for (i = 0; i <= __MAX_CSSID; i++) {
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
|
|
|
css = channel_subsystems[i];
|
2008-04-17 05:46:01 +00:00
|
|
|
mutex_lock(&css->mutex);
|
2007-10-12 14:11:20 +00:00
|
|
|
if (css->cm_enabled)
|
|
|
|
if (chsc_secm(css, 0))
|
|
|
|
ret = NOTIFY_BAD;
|
2008-04-17 05:46:01 +00:00
|
|
|
mutex_unlock(&css->mutex);
|
2007-10-12 14:11:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block css_reboot_notifier = {
|
|
|
|
.notifier_call = css_reboot_event,
|
|
|
|
};
|
|
|
|
|
2009-06-16 08:30:22 +00:00
|
|
|
/*
|
|
|
|
* Since the css devices are neither on a bus nor have a class
|
|
|
|
* nor have a special device type, we cannot stop/restart channel
|
|
|
|
* path measurements via the normal suspend/resume callbacks, but have
|
|
|
|
* to use notifiers.
|
|
|
|
*/
|
|
|
|
static int css_power_event(struct notifier_block *this, unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case PM_HIBERNATION_PREPARE:
|
|
|
|
case PM_SUSPEND_PREPARE:
|
|
|
|
ret = NOTIFY_DONE;
|
|
|
|
for (i = 0; i <= __MAX_CSSID; i++) {
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
|
|
|
css = channel_subsystems[i];
|
|
|
|
mutex_lock(&css->mutex);
|
|
|
|
if (!css->cm_enabled) {
|
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-25 14:10:29 +00:00
|
|
|
if (__chsc_do_secm(css, 0))
|
2009-06-16 08:30:22 +00:00
|
|
|
ret = NOTIFY_BAD;
|
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PM_POST_HIBERNATION:
|
|
|
|
case PM_POST_SUSPEND:
|
|
|
|
ret = NOTIFY_DONE;
|
|
|
|
for (i = 0; i <= __MAX_CSSID; i++) {
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
|
|
|
css = channel_subsystems[i];
|
|
|
|
mutex_lock(&css->mutex);
|
|
|
|
if (!css->cm_enabled) {
|
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
continue;
|
|
|
|
}
|
2010-10-25 14:10:29 +00:00
|
|
|
if (__chsc_do_secm(css, 1))
|
2009-06-16 08:30:22 +00:00
|
|
|
ret = NOTIFY_BAD;
|
|
|
|
mutex_unlock(&css->mutex);
|
|
|
|
}
|
|
|
|
/* search for subchannels, which appeared during hibernation */
|
|
|
|
css_schedule_reprobe();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
static struct notifier_block css_power_notifier = {
|
|
|
|
.notifier_call = css_power_event,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Now that the driver core is running, we can setup our channel subsystem.
|
|
|
|
* The struct subchannel's are created during probing (except for the
|
|
|
|
* static console subchannel).
|
|
|
|
*/
|
2009-09-22 20:58:33 +00:00
|
|
|
static int __init css_bus_init(void)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-01-06 08:19:23 +00:00
|
|
|
int ret, i;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-10-25 14:10:28 +00:00
|
|
|
ret = chsc_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2010-10-25 14:10:29 +00:00
|
|
|
chsc_determine_css_characteristics();
|
2009-09-22 20:58:37 +00:00
|
|
|
/* Try to enable MSS. */
|
|
|
|
ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
|
2010-04-22 15:17:03 +00:00
|
|
|
if (ret)
|
2009-09-22 20:58:37 +00:00
|
|
|
max_ssid = 0;
|
2010-04-22 15:17:03 +00:00
|
|
|
else /* Success. */
|
|
|
|
max_ssid = __MAX_SSID;
|
2009-09-22 20:58:37 +00:00
|
|
|
|
2007-07-27 10:29:21 +00:00
|
|
|
ret = slow_subchannel_init();
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2009-03-26 14:24:01 +00:00
|
|
|
ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
|
2008-07-14 07:58:46 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((ret = bus_register(&css_bus_type)))
|
|
|
|
goto out;
|
|
|
|
|
2006-01-06 08:19:23 +00:00
|
|
|
/* Setup css structure. */
|
|
|
|
for (i = 0; i <= __MAX_CSSID; i++) {
|
2007-10-12 14:11:13 +00:00
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
|
|
|
css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
|
|
|
|
if (!css) {
|
2006-01-06 08:19:23 +00:00
|
|
|
ret = -ENOMEM;
|
2006-01-06 08:19:25 +00:00
|
|
|
goto out_unregister;
|
2006-01-06 08:19:23 +00:00
|
|
|
}
|
2007-10-12 14:11:13 +00:00
|
|
|
channel_subsystems[i] = css;
|
2006-12-08 14:54:28 +00:00
|
|
|
ret = setup_css(i);
|
2008-09-09 10:38:57 +00:00
|
|
|
if (ret) {
|
|
|
|
kfree(channel_subsystems[i]);
|
|
|
|
goto out_unregister;
|
|
|
|
}
|
2007-10-12 14:11:13 +00:00
|
|
|
ret = device_register(&css->device);
|
2008-09-09 10:38:57 +00:00
|
|
|
if (ret) {
|
|
|
|
put_device(&css->device);
|
|
|
|
goto out_unregister;
|
|
|
|
}
|
2008-07-14 07:58:57 +00:00
|
|
|
if (css_chsc_characteristics.secm) {
|
2007-10-12 14:11:13 +00:00
|
|
|
ret = device_create_file(&css->device,
|
2006-07-12 14:40:19 +00:00
|
|
|
&dev_attr_cm_enable);
|
|
|
|
if (ret)
|
|
|
|
goto out_device;
|
|
|
|
}
|
2007-10-12 14:11:13 +00:00
|
|
|
ret = device_register(&css->pseudo_subchannel->dev);
|
2009-09-11 08:28:38 +00:00
|
|
|
if (ret) {
|
|
|
|
put_device(&css->pseudo_subchannel->dev);
|
2006-12-08 14:54:28 +00:00
|
|
|
goto out_file;
|
2009-09-11 08:28:38 +00:00
|
|
|
}
|
2006-01-06 08:19:23 +00:00
|
|
|
}
|
2007-10-12 14:11:20 +00:00
|
|
|
ret = register_reboot_notifier(&css_reboot_notifier);
|
|
|
|
if (ret)
|
2008-09-09 10:38:57 +00:00
|
|
|
goto out_unregister;
|
2009-06-16 08:30:22 +00:00
|
|
|
ret = register_pm_notifier(&css_power_notifier);
|
|
|
|
if (ret) {
|
|
|
|
unregister_reboot_notifier(&css_reboot_notifier);
|
|
|
|
goto out_unregister;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
css_init_done = 1;
|
|
|
|
|
2008-07-14 07:58:58 +00:00
|
|
|
/* Enable default isc for I/O subchannels. */
|
2008-07-14 07:59:01 +00:00
|
|
|
isc_register(IO_SCH_ISC);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
2006-12-08 14:54:28 +00:00
|
|
|
out_file:
|
2008-09-09 10:38:57 +00:00
|
|
|
if (css_chsc_characteristics.secm)
|
|
|
|
device_remove_file(&channel_subsystems[i]->device,
|
|
|
|
&dev_attr_cm_enable);
|
2006-07-12 14:40:19 +00:00
|
|
|
out_device:
|
2007-10-12 14:11:13 +00:00
|
|
|
device_unregister(&channel_subsystems[i]->device);
|
2006-01-06 08:19:25 +00:00
|
|
|
out_unregister:
|
2006-01-06 08:19:23 +00:00
|
|
|
while (i > 0) {
|
2007-10-12 14:11:13 +00:00
|
|
|
struct channel_subsystem *css;
|
|
|
|
|
2006-01-06 08:19:23 +00:00
|
|
|
i--;
|
2007-10-12 14:11:13 +00:00
|
|
|
css = channel_subsystems[i];
|
|
|
|
device_unregister(&css->pseudo_subchannel->dev);
|
2008-09-09 10:38:57 +00:00
|
|
|
css->pseudo_subchannel = NULL;
|
2008-07-14 07:58:57 +00:00
|
|
|
if (css_chsc_characteristics.secm)
|
2007-10-12 14:11:13 +00:00
|
|
|
device_remove_file(&css->device,
|
2006-03-24 11:15:14 +00:00
|
|
|
&dev_attr_cm_enable);
|
2007-10-12 14:11:13 +00:00
|
|
|
device_unregister(&css->device);
|
2006-01-06 08:19:23 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
bus_unregister(&css_bus_type);
|
|
|
|
out:
|
2010-10-25 14:10:28 +00:00
|
|
|
crw_unregister_handler(CRW_RSC_SCH);
|
2009-09-22 20:58:36 +00:00
|
|
|
idset_free(slow_subchannel_set);
|
2010-10-25 14:10:28 +00:00
|
|
|
chsc_init_cleanup();
|
2008-12-25 12:39:36 +00:00
|
|
|
pr_alert("The CSS device driver initialization failed with "
|
|
|
|
"errno=%d\n", ret);
|
2005-04-16 22:20:36 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-09-22 20:58:33 +00:00
|
|
|
static void __init css_bus_cleanup(void)
|
|
|
|
{
|
|
|
|
struct channel_subsystem *css;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i <= __MAX_CSSID; i++) {
|
|
|
|
css = channel_subsystems[i];
|
|
|
|
device_unregister(&css->pseudo_subchannel->dev);
|
|
|
|
css->pseudo_subchannel = NULL;
|
|
|
|
if (css_chsc_characteristics.secm)
|
|
|
|
device_remove_file(&css->device, &dev_attr_cm_enable);
|
|
|
|
device_unregister(&css->device);
|
|
|
|
}
|
|
|
|
bus_unregister(&css_bus_type);
|
2010-10-25 14:10:28 +00:00
|
|
|
crw_unregister_handler(CRW_RSC_SCH);
|
2009-09-22 20:58:36 +00:00
|
|
|
idset_free(slow_subchannel_set);
|
2010-10-25 14:10:28 +00:00
|
|
|
chsc_init_cleanup();
|
2009-09-22 20:58:33 +00:00
|
|
|
isc_unregister(IO_SCH_ISC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init channel_subsystem_init(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = css_bus_init();
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2010-02-26 21:37:24 +00:00
|
|
|
cio_work_q = create_singlethread_workqueue("cio");
|
|
|
|
if (!cio_work_q) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_bus;
|
|
|
|
}
|
2009-09-22 20:58:33 +00:00
|
|
|
ret = io_subchannel_init();
|
|
|
|
if (ret)
|
2010-02-26 21:37:24 +00:00
|
|
|
goto out_wq;
|
2009-09-22 20:58:33 +00:00
|
|
|
|
|
|
|
return ret;
|
2010-02-26 21:37:24 +00:00
|
|
|
out_wq:
|
|
|
|
destroy_workqueue(cio_work_q);
|
|
|
|
out_bus:
|
|
|
|
css_bus_cleanup();
|
|
|
|
return ret;
|
2009-09-22 20:58:33 +00:00
|
|
|
}
|
|
|
|
subsys_initcall(channel_subsystem_init);
|
|
|
|
|
2009-09-22 20:58:35 +00:00
|
|
|
static int css_settle(struct device_driver *drv, void *unused)
|
|
|
|
{
|
|
|
|
struct css_driver *cssdrv = to_cssdriver(drv);
|
|
|
|
|
|
|
|
if (cssdrv->settle)
|
2010-02-26 21:37:27 +00:00
|
|
|
return cssdrv->settle();
|
2009-09-22 20:58:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-02-26 21:37:29 +00:00
|
|
|
int css_complete_work(void)
|
2010-02-26 21:37:25 +00:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Wait for the evaluation of subchannels to finish. */
|
2010-02-26 21:37:27 +00:00
|
|
|
ret = wait_event_interruptible(css_eval_wq,
|
|
|
|
atomic_read(&css_eval_scheduled) == 0);
|
|
|
|
if (ret)
|
|
|
|
return -EINTR;
|
2010-02-26 21:37:25 +00:00
|
|
|
flush_workqueue(cio_work_q);
|
|
|
|
/* Wait for the subchannel type specific initialization to finish */
|
2010-02-26 21:37:27 +00:00
|
|
|
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
|
2010-02-26 21:37:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-09-22 20:58:33 +00:00
|
|
|
/*
|
|
|
|
* Wait for the initialization of devices to finish, to make sure we are
|
|
|
|
* done with our setup if the search for the root device starts.
|
|
|
|
*/
|
|
|
|
static int __init channel_subsystem_init_sync(void)
|
|
|
|
{
|
2009-09-22 20:58:38 +00:00
|
|
|
/* Start initial subchannel evaluation. */
|
|
|
|
css_schedule_eval_all();
|
2010-02-26 21:37:25 +00:00
|
|
|
css_complete_work();
|
|
|
|
return 0;
|
2009-09-22 20:58:33 +00:00
|
|
|
}
|
|
|
|
subsys_initcall_sync(channel_subsystem_init_sync);
|
|
|
|
|
2010-04-22 15:17:04 +00:00
|
|
|
void channel_subsystem_reinit(void)
|
|
|
|
{
|
2010-10-25 14:10:32 +00:00
|
|
|
struct channel_path *chp;
|
|
|
|
struct chp_id chpid;
|
|
|
|
|
2010-04-22 15:17:04 +00:00
|
|
|
chsc_enable_facility(CHSC_SDA_OC_MSS);
|
2010-10-25 14:10:32 +00:00
|
|
|
chp_id_for_each(&chpid) {
|
|
|
|
chp = chpid_to_chp(chpid);
|
|
|
|
if (!chp)
|
|
|
|
continue;
|
|
|
|
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
|
|
|
|
}
|
2010-04-22 15:17:04 +00:00
|
|
|
}
|
|
|
|
|
2010-02-26 21:37:25 +00:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
static ssize_t cio_settle_write(struct file *file, const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
2010-02-26 21:37:27 +00:00
|
|
|
int ret;
|
|
|
|
|
2010-02-26 21:37:25 +00:00
|
|
|
/* Handle pending CRW's. */
|
|
|
|
crw_wait_for_channel_report();
|
2010-02-26 21:37:27 +00:00
|
|
|
ret = css_complete_work();
|
|
|
|
|
|
|
|
return ret ? ret : count;
|
2010-02-26 21:37:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations cio_settle_proc_fops = {
|
2010-05-17 08:00:07 +00:00
|
|
|
.open = nonseekable_open,
|
2010-02-26 21:37:25 +00:00
|
|
|
.write = cio_settle_write,
|
llseek: automatically add .llseek fop
All file_operations should get a .llseek operation so we can make
nonseekable_open the default for future file operations without a
.llseek pointer.
The three cases that we can automatically detect are no_llseek, seq_lseek
and default_llseek. For cases where we can we can automatically prove that
the file offset is always ignored, we use noop_llseek, which maintains
the current behavior of not returning an error from a seek.
New drivers should normally not use noop_llseek but instead use no_llseek
and call nonseekable_open at open time. Existing drivers can be converted
to do the same when the maintainer knows for certain that no user code
relies on calling seek on the device file.
The generated code is often incorrectly indented and right now contains
comments that clarify for each added line why a specific variant was
chosen. In the version that gets submitted upstream, the comments will
be gone and I will manually fix the indentation, because there does not
seem to be a way to do that using coccinelle.
Some amount of new code is currently sitting in linux-next that should get
the same modifications, which I will do at the end of the merge window.
Many thanks to Julia Lawall for helping me learn to write a semantic
patch that does all this.
===== begin semantic patch =====
// This adds an llseek= method to all file operations,
// as a preparation for making no_llseek the default.
//
// The rules are
// - use no_llseek explicitly if we do nonseekable_open
// - use seq_lseek for sequential files
// - use default_llseek if we know we access f_pos
// - use noop_llseek if we know we don't access f_pos,
// but we still want to allow users to call lseek
//
@ open1 exists @
identifier nested_open;
@@
nested_open(...)
{
<+...
nonseekable_open(...)
...+>
}
@ open exists@
identifier open_f;
identifier i, f;
identifier open1.nested_open;
@@
int open_f(struct inode *i, struct file *f)
{
<+...
(
nonseekable_open(...)
|
nested_open(...)
)
...+>
}
@ read disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ read_no_fpos disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
... when != off
}
@ write @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ write_no_fpos @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
... when != off
}
@ fops0 @
identifier fops;
@@
struct file_operations fops = {
...
};
@ has_llseek depends on fops0 @
identifier fops0.fops;
identifier llseek_f;
@@
struct file_operations fops = {
...
.llseek = llseek_f,
...
};
@ has_read depends on fops0 @
identifier fops0.fops;
identifier read_f;
@@
struct file_operations fops = {
...
.read = read_f,
...
};
@ has_write depends on fops0 @
identifier fops0.fops;
identifier write_f;
@@
struct file_operations fops = {
...
.write = write_f,
...
};
@ has_open depends on fops0 @
identifier fops0.fops;
identifier open_f;
@@
struct file_operations fops = {
...
.open = open_f,
...
};
// use no_llseek if we call nonseekable_open
////////////////////////////////////////////
@ nonseekable1 depends on !has_llseek && has_open @
identifier fops0.fops;
identifier nso ~= "nonseekable_open";
@@
struct file_operations fops = {
... .open = nso, ...
+.llseek = no_llseek, /* nonseekable */
};
@ nonseekable2 depends on !has_llseek @
identifier fops0.fops;
identifier open.open_f;
@@
struct file_operations fops = {
... .open = open_f, ...
+.llseek = no_llseek, /* open uses nonseekable */
};
// use seq_lseek for sequential files
/////////////////////////////////////
@ seq depends on !has_llseek @
identifier fops0.fops;
identifier sr ~= "seq_read";
@@
struct file_operations fops = {
... .read = sr, ...
+.llseek = seq_lseek, /* we have seq_read */
};
// use default_llseek if there is a readdir
///////////////////////////////////////////
@ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier readdir_e;
@@
// any other fop is used that changes pos
struct file_operations fops = {
... .readdir = readdir_e, ...
+.llseek = default_llseek, /* readdir is present */
};
// use default_llseek if at least one of read/write touches f_pos
/////////////////////////////////////////////////////////////////
@ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read.read_f;
@@
// read fops use offset
struct file_operations fops = {
... .read = read_f, ...
+.llseek = default_llseek, /* read accesses f_pos */
};
@ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write.write_f;
@@
// write fops use offset
struct file_operations fops = {
... .write = write_f, ...
+ .llseek = default_llseek, /* write accesses f_pos */
};
// Use noop_llseek if neither read nor write accesses f_pos
///////////////////////////////////////////////////////////
@ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
identifier write_no_fpos.write_f;
@@
// write fops use offset
struct file_operations fops = {
...
.write = write_f,
.read = read_f,
...
+.llseek = noop_llseek, /* read and write both use no f_pos */
};
@ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write_no_fpos.write_f;
@@
struct file_operations fops = {
... .write = write_f, ...
+.llseek = noop_llseek, /* write uses no f_pos */
};
@ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
@@
struct file_operations fops = {
... .read = read_f, ...
+.llseek = noop_llseek, /* read uses no f_pos */
};
@ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
@@
struct file_operations fops = {
...
+.llseek = noop_llseek, /* no read or write fn */
};
===== End semantic patch =====
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Julia Lawall <julia@diku.dk>
Cc: Christoph Hellwig <hch@infradead.org>
2010-08-15 16:52:59 +00:00
|
|
|
.llseek = no_llseek,
|
2010-02-26 21:37:25 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init cio_settle_init(void)
|
|
|
|
{
|
|
|
|
struct proc_dir_entry *entry;
|
|
|
|
|
|
|
|
entry = proc_create("cio_settle", S_IWUSR, NULL,
|
|
|
|
&cio_settle_proc_fops);
|
|
|
|
if (!entry)
|
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
device_initcall(cio_settle_init);
|
|
|
|
#endif /*CONFIG_PROC_FS*/
|
|
|
|
|
2006-12-08 14:54:28 +00:00
|
|
|
int sch_is_pseudo_sch(struct subchannel *sch)
|
|
|
|
{
|
|
|
|
return sch == to_css(sch->dev.parent)->pseudo_subchannel;
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:59:03 +00:00
|
|
|
static int css_bus_match(struct device *dev, struct device_driver *drv)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-01-26 13:10:38 +00:00
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *driver = to_cssdriver(drv);
|
2008-07-14 07:59:03 +00:00
|
|
|
struct css_device_id *id;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-07-14 07:59:03 +00:00
|
|
|
for (id = driver->subchannel_type; id->match_flags; id++) {
|
|
|
|
if (sch->st == id->type)
|
|
|
|
return 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:40 +00:00
|
|
|
static int css_probe(struct device *dev)
|
2006-01-11 09:56:22 +00:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2008-01-26 13:10:40 +00:00
|
|
|
int ret;
|
2006-01-11 09:56:22 +00:00
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2008-01-26 13:10:38 +00:00
|
|
|
sch->driver = to_cssdriver(dev->driver);
|
2008-01-26 13:10:40 +00:00
|
|
|
ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
|
|
|
|
if (ret)
|
|
|
|
sch->driver = NULL;
|
|
|
|
return ret;
|
2006-01-11 09:56:22 +00:00
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:40 +00:00
|
|
|
static int css_remove(struct device *dev)
|
2006-01-11 09:56:22 +00:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
2008-01-26 13:10:40 +00:00
|
|
|
int ret;
|
2006-01-11 09:56:22 +00:00
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2008-01-26 13:10:40 +00:00
|
|
|
ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
|
|
|
|
sch->driver = NULL;
|
|
|
|
return ret;
|
2006-01-11 09:56:22 +00:00
|
|
|
}
|
|
|
|
|
2008-01-26 13:10:40 +00:00
|
|
|
static void css_shutdown(struct device *dev)
|
2006-01-11 09:56:22 +00:00
|
|
|
{
|
|
|
|
struct subchannel *sch;
|
|
|
|
|
|
|
|
sch = to_subchannel(dev);
|
2008-01-26 13:10:40 +00:00
|
|
|
if (sch->driver && sch->driver->shutdown)
|
2006-01-11 09:56:22 +00:00
|
|
|
sch->driver->shutdown(sch);
|
|
|
|
}
|
|
|
|
|
2008-07-14 07:58:44 +00:00
|
|
|
static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = add_uevent_var(env, "ST=%01X", sch->st);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-06-16 08:30:22 +00:00
|
|
|
static int css_pm_prepare(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *drv;
|
|
|
|
|
|
|
|
if (mutex_is_locked(&sch->reg_mutex))
|
|
|
|
return -EAGAIN;
|
|
|
|
if (!sch->dev.driver)
|
|
|
|
return 0;
|
|
|
|
drv = to_cssdriver(sch->dev.driver);
|
|
|
|
/* Notify drivers that they may not register children. */
|
|
|
|
return drv->prepare ? drv->prepare(sch) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void css_pm_complete(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *drv;
|
|
|
|
|
|
|
|
if (!sch->dev.driver)
|
|
|
|
return;
|
|
|
|
drv = to_cssdriver(sch->dev.driver);
|
|
|
|
if (drv->complete)
|
|
|
|
drv->complete(sch);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int css_pm_freeze(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *drv;
|
|
|
|
|
|
|
|
if (!sch->dev.driver)
|
|
|
|
return 0;
|
|
|
|
drv = to_cssdriver(sch->dev.driver);
|
|
|
|
return drv->freeze ? drv->freeze(sch) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int css_pm_thaw(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *drv;
|
|
|
|
|
|
|
|
if (!sch->dev.driver)
|
|
|
|
return 0;
|
|
|
|
drv = to_cssdriver(sch->dev.driver);
|
|
|
|
return drv->thaw ? drv->thaw(sch) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int css_pm_restore(struct device *dev)
|
|
|
|
{
|
|
|
|
struct subchannel *sch = to_subchannel(dev);
|
|
|
|
struct css_driver *drv;
|
|
|
|
|
2010-10-25 14:10:33 +00:00
|
|
|
css_update_ssd_info(sch);
|
2009-06-16 08:30:22 +00:00
|
|
|
if (!sch->dev.driver)
|
|
|
|
return 0;
|
|
|
|
drv = to_cssdriver(sch->dev.driver);
|
|
|
|
return drv->restore ? drv->restore(sch) : 0;
|
|
|
|
}
|
|
|
|
|
2009-12-15 02:00:08 +00:00
|
|
|
static const struct dev_pm_ops css_pm_ops = {
|
2009-06-16 08:30:22 +00:00
|
|
|
.prepare = css_pm_prepare,
|
|
|
|
.complete = css_pm_complete,
|
|
|
|
.freeze = css_pm_freeze,
|
|
|
|
.thaw = css_pm_thaw,
|
|
|
|
.restore = css_pm_restore,
|
|
|
|
};
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
struct bus_type css_bus_type = {
|
2006-01-11 09:56:22 +00:00
|
|
|
.name = "css",
|
|
|
|
.match = css_bus_match,
|
|
|
|
.probe = css_probe,
|
|
|
|
.remove = css_remove,
|
|
|
|
.shutdown = css_shutdown,
|
2008-07-14 07:58:44 +00:00
|
|
|
.uevent = css_uevent,
|
2009-06-16 08:30:22 +00:00
|
|
|
.pm = &css_pm_ops,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2008-01-26 13:10:41 +00:00
|
|
|
/**
|
|
|
|
* css_driver_register - register a css driver
|
|
|
|
* @cdrv: css driver to register
|
|
|
|
*
|
|
|
|
* This is mainly a wrapper around driver_register that sets name
|
|
|
|
* and bus_type in the embedded struct device_driver correctly.
|
|
|
|
*/
|
|
|
|
int css_driver_register(struct css_driver *cdrv)
|
|
|
|
{
|
|
|
|
cdrv->drv.bus = &css_bus_type;
|
|
|
|
return driver_register(&cdrv->drv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_driver_register);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* css_driver_unregister - unregister a css driver
|
|
|
|
* @cdrv: css driver to unregister
|
|
|
|
*
|
|
|
|
* This is a wrapper around driver_unregister.
|
|
|
|
*/
|
|
|
|
void css_driver_unregister(struct css_driver *cdrv)
|
|
|
|
{
|
|
|
|
driver_unregister(&cdrv->drv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(css_driver_unregister);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_SYMBOL(css_bus_type);
|