[S390] cio: consolidate workqueues

We used to maintain 2 singlethreaded workqueues for synchronization
and to trigger work from interrupt context. Since our latest cio
changes we only use one of these workqueues. So get rid of the
unused workqueue, rename the remaining one to "cio_work_q" and move
its ownership to the channel subsystem driver.

Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Sebastian Ott 2010-02-26 22:37:24 +01:00 committed by Martin Schwidefsky
parent 6f5d09a0e9
commit be5d3823f2
4 changed files with 25 additions and 32 deletions

View File

@ -232,7 +232,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
if (!get_device(&sch->dev)) if (!get_device(&sch->dev))
return; return;
sch->todo = todo; sch->todo = todo;
if (!queue_work(slow_path_wq, &sch->todo_work)) { if (!queue_work(cio_work_q, &sch->todo_work)) {
/* Already queued, release workqueue ref. */ /* Already queued, release workqueue ref. */
put_device(&sch->dev); put_device(&sch->dev);
} }
@ -543,7 +543,7 @@ static void css_slow_path_func(struct work_struct *unused)
} }
static DECLARE_WORK(slow_path_work, css_slow_path_func); static DECLARE_WORK(slow_path_work, css_slow_path_func);
struct workqueue_struct *slow_path_wq; struct workqueue_struct *cio_work_q;
void css_schedule_eval(struct subchannel_id schid) void css_schedule_eval(struct subchannel_id schid)
{ {
@ -552,7 +552,7 @@ void css_schedule_eval(struct subchannel_id schid)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_sch_add(slow_subchannel_set, schid); idset_sch_add(slow_subchannel_set, schid);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
@ -563,7 +563,7 @@ void css_schedule_eval_all(void)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_fill(slow_subchannel_set); idset_fill(slow_subchannel_set);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
} }
@ -594,14 +594,14 @@ void css_schedule_eval_all_unreg(void)
spin_lock_irqsave(&slow_subchannel_lock, flags); spin_lock_irqsave(&slow_subchannel_lock, flags);
idset_add_set(slow_subchannel_set, unreg_set); idset_add_set(slow_subchannel_set, unreg_set);
atomic_set(&css_eval_scheduled, 1); atomic_set(&css_eval_scheduled, 1);
queue_work(slow_path_wq, &slow_path_work); queue_work(cio_work_q, &slow_path_work);
spin_unlock_irqrestore(&slow_subchannel_lock, flags); spin_unlock_irqrestore(&slow_subchannel_lock, flags);
idset_free(unreg_set); idset_free(unreg_set);
} }
void css_wait_for_slow_path(void) void css_wait_for_slow_path(void)
{ {
flush_workqueue(slow_path_wq); flush_workqueue(cio_work_q);
} }
/* Schedule reprobing of all unregistered subchannels. */ /* Schedule reprobing of all unregistered subchannels. */
@ -992,12 +992,21 @@ static int __init channel_subsystem_init(void)
ret = css_bus_init(); ret = css_bus_init();
if (ret) if (ret)
return ret; return ret;
cio_work_q = create_singlethread_workqueue("cio");
if (!cio_work_q) {
ret = -ENOMEM;
goto out_bus;
}
ret = io_subchannel_init(); ret = io_subchannel_init();
if (ret) if (ret)
css_bus_cleanup(); goto out_wq;
return ret; return ret;
out_wq:
destroy_workqueue(cio_work_q);
out_bus:
css_bus_cleanup();
return ret;
} }
subsys_initcall(channel_subsystem_init); subsys_initcall(channel_subsystem_init);
@ -1020,6 +1029,7 @@ static int __init channel_subsystem_init_sync(void)
css_schedule_eval_all(); css_schedule_eval_all();
/* Wait for the evaluation of subchannels to finish. */ /* Wait for the evaluation of subchannels to finish. */
wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0);
flush_workqueue(cio_work_q);
/* Wait for the subchannel type specific initialization to finish */ /* Wait for the subchannel type specific initialization to finish */
return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
} }

View File

@ -151,7 +151,7 @@ int sch_is_pseudo_sch(struct subchannel *);
struct schib; struct schib;
int css_sch_is_valid(struct schib *); int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq; extern struct workqueue_struct *cio_work_q;
void css_wait_for_slow_path(void); void css_wait_for_slow_path(void);
void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
#endif #endif

View File

@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
int); int);
static void recovery_func(unsigned long data); static void recovery_func(unsigned long data);
struct workqueue_struct *ccw_device_work;
wait_queue_head_t ccw_device_init_wq; wait_queue_head_t ccw_device_init_wq;
atomic_t ccw_device_init_count; atomic_t ccw_device_init_count;
@ -163,7 +162,7 @@ static void io_subchannel_settle(void)
{ {
wait_event(ccw_device_init_wq, wait_event(ccw_device_init_wq,
atomic_read(&ccw_device_init_count) == 0); atomic_read(&ccw_device_init_count) == 0);
flush_workqueue(ccw_device_work); flush_workqueue(cio_work_q);
} }
static struct css_driver io_subchannel_driver = { static struct css_driver io_subchannel_driver = {
@ -188,27 +187,13 @@ int __init io_subchannel_init(void)
atomic_set(&ccw_device_init_count, 0); atomic_set(&ccw_device_init_count, 0);
setup_timer(&recovery_timer, recovery_func, 0); setup_timer(&recovery_timer, recovery_func, 0);
ccw_device_work = create_singlethread_workqueue("cio"); ret = bus_register(&ccw_bus_type);
if (!ccw_device_work) if (ret)
return -ENOMEM; return ret;
slow_path_wq = create_singlethread_workqueue("kslowcrw");
if (!slow_path_wq) {
ret = -ENOMEM;
goto out_err;
}
if ((ret = bus_register (&ccw_bus_type)))
goto out_err;
ret = css_driver_register(&io_subchannel_driver); ret = css_driver_register(&io_subchannel_driver);
if (ret) if (ret)
goto out_err; bus_unregister(&ccw_bus_type);
return 0;
out_err:
if (ccw_device_work)
destroy_workqueue(ccw_device_work);
if (slow_path_wq)
destroy_workqueue(slow_path_wq);
return ret; return ret;
} }
@ -2028,7 +2013,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
/* Get workqueue ref. */ /* Get workqueue ref. */
if (!get_device(&cdev->dev)) if (!get_device(&cdev->dev))
return; return;
if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
/* Already queued, release workqueue ref. */ /* Already queued, release workqueue ref. */
put_device(&cdev->dev); put_device(&cdev->dev);
} }
@ -2041,5 +2026,4 @@ EXPORT_SYMBOL(ccw_driver_register);
EXPORT_SYMBOL(ccw_driver_unregister); EXPORT_SYMBOL(ccw_driver_unregister);
EXPORT_SYMBOL(get_ccwdev_by_busid); EXPORT_SYMBOL(get_ccwdev_by_busid);
EXPORT_SYMBOL(ccw_bus_type); EXPORT_SYMBOL(ccw_bus_type);
EXPORT_SYMBOL(ccw_device_work);
EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);

View File

@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
cdev->private->state == DEV_STATE_BOXED); cdev->private->state == DEV_STATE_BOXED);
} }
extern struct workqueue_struct *ccw_device_work;
extern wait_queue_head_t ccw_device_init_wq; extern wait_queue_head_t ccw_device_init_wq;
extern atomic_t ccw_device_init_count; extern atomic_t ccw_device_init_count;
int __init io_subchannel_init(void); int __init io_subchannel_init(void);