mirror of
https://github.com/torvalds/linux.git
synced 2024-11-22 12:11:40 +00:00
[PATCH] sem2mutex: drivers: raw, connector, dcdbas, ppp_generic
Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
9cdf18279d
commit
8ed965d612
@ -19,6 +19,7 @@
|
||||
#include <linux/uio.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@ -29,7 +30,7 @@ struct raw_device_data {
|
||||
|
||||
static struct class *raw_class;
|
||||
static struct raw_device_data raw_devices[MAX_RAW_MINORS];
|
||||
static DECLARE_MUTEX(raw_mutex);
|
||||
static DEFINE_MUTEX(raw_mutex);
|
||||
static struct file_operations raw_ctl_fops; /* forward declaration */
|
||||
|
||||
/*
|
||||
@ -53,7 +54,7 @@ static int raw_open(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
down(&raw_mutex);
|
||||
mutex_lock(&raw_mutex);
|
||||
|
||||
/*
|
||||
* All we need to do on open is check that the device is bound.
|
||||
@ -78,7 +79,7 @@ static int raw_open(struct inode *inode, struct file *filp)
|
||||
filp->f_dentry->d_inode->i_mapping =
|
||||
bdev->bd_inode->i_mapping;
|
||||
filp->private_data = bdev;
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
@ -86,7 +87,7 @@ out2:
|
||||
out1:
|
||||
blkdev_put(bdev);
|
||||
out:
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -99,14 +100,14 @@ static int raw_release(struct inode *inode, struct file *filp)
|
||||
const int minor= iminor(inode);
|
||||
struct block_device *bdev;
|
||||
|
||||
down(&raw_mutex);
|
||||
mutex_lock(&raw_mutex);
|
||||
bdev = raw_devices[minor].binding;
|
||||
if (--raw_devices[minor].inuse == 0) {
|
||||
/* Here inode->i_mapping == bdev->bd_inode->i_mapping */
|
||||
inode->i_mapping = &inode->i_data;
|
||||
inode->i_mapping->backing_dev_info = &default_backing_dev_info;
|
||||
}
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev);
|
||||
@ -187,9 +188,9 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
|
||||
goto out;
|
||||
}
|
||||
|
||||
down(&raw_mutex);
|
||||
mutex_lock(&raw_mutex);
|
||||
if (rawdev->inuse) {
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
@ -211,11 +212,11 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
|
||||
bind_device(&rq);
|
||||
}
|
||||
}
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
} else {
|
||||
struct block_device *bdev;
|
||||
|
||||
down(&raw_mutex);
|
||||
mutex_lock(&raw_mutex);
|
||||
bdev = rawdev->binding;
|
||||
if (bdev) {
|
||||
rq.block_major = MAJOR(bdev->bd_dev);
|
||||
@ -223,7 +224,7 @@ static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
|
||||
} else {
|
||||
rq.block_major = rq.block_minor = 0;
|
||||
}
|
||||
up(&raw_mutex);
|
||||
mutex_unlock(&raw_mutex);
|
||||
if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/netlink.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/connector.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include <net/sock.h>
|
||||
|
||||
@ -41,7 +42,7 @@ module_param(cn_val, uint, 0);
|
||||
MODULE_PARM_DESC(cn_idx, "Connector's main device idx.");
|
||||
MODULE_PARM_DESC(cn_val, "Connector's main device val.");
|
||||
|
||||
static DECLARE_MUTEX(notify_lock);
|
||||
static DEFINE_MUTEX(notify_lock);
|
||||
static LIST_HEAD(notify_list);
|
||||
|
||||
static struct cn_dev cdev;
|
||||
@ -260,7 +261,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
|
||||
{
|
||||
struct cn_ctl_entry *ent;
|
||||
|
||||
down(¬ify_lock);
|
||||
mutex_lock(¬ify_lock);
|
||||
list_for_each_entry(ent, ¬ify_list, notify_entry) {
|
||||
int i;
|
||||
struct cn_notify_req *req;
|
||||
@ -293,7 +294,7 @@ static void cn_notify(struct cb_id *id, u32 notify_event)
|
||||
cn_netlink_send(&m, ctl->group, GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
up(¬ify_lock);
|
||||
mutex_unlock(¬ify_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -407,14 +408,14 @@ static void cn_callback(void *data)
|
||||
if (ctl->group == 0) {
|
||||
struct cn_ctl_entry *n;
|
||||
|
||||
down(¬ify_lock);
|
||||
mutex_lock(¬ify_lock);
|
||||
list_for_each_entry_safe(ent, n, ¬ify_list, notify_entry) {
|
||||
if (cn_ctl_msg_equals(ent->msg, ctl)) {
|
||||
list_del(&ent->notify_entry);
|
||||
kfree(ent);
|
||||
}
|
||||
}
|
||||
up(¬ify_lock);
|
||||
mutex_unlock(¬ify_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -429,9 +430,9 @@ static void cn_callback(void *data)
|
||||
|
||||
memcpy(ent->msg, ctl, size - sizeof(*ent));
|
||||
|
||||
down(¬ify_lock);
|
||||
mutex_lock(¬ify_lock);
|
||||
list_add(&ent->notify_entry, ¬ify_list);
|
||||
up(¬ify_lock);
|
||||
mutex_unlock(¬ify_lock);
|
||||
}
|
||||
|
||||
static int __init cn_init(void)
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
@ -48,7 +49,7 @@ static u8 *smi_data_buf;
|
||||
static dma_addr_t smi_data_buf_handle;
|
||||
static unsigned long smi_data_buf_size;
|
||||
static u32 smi_data_buf_phys_addr;
|
||||
static DECLARE_MUTEX(smi_data_lock);
|
||||
static DEFINE_MUTEX(smi_data_lock);
|
||||
|
||||
static unsigned int host_control_action;
|
||||
static unsigned int host_control_smi_type;
|
||||
@ -139,9 +140,9 @@ static ssize_t smi_data_buf_size_store(struct device *dev,
|
||||
buf_size = simple_strtoul(buf, NULL, 10);
|
||||
|
||||
/* make sure SMI data buffer is at least buf_size */
|
||||
down(&smi_data_lock);
|
||||
mutex_lock(&smi_data_lock);
|
||||
ret = smi_data_buf_realloc(buf_size);
|
||||
up(&smi_data_lock);
|
||||
mutex_unlock(&smi_data_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -154,7 +155,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
|
||||
size_t max_read;
|
||||
ssize_t ret;
|
||||
|
||||
down(&smi_data_lock);
|
||||
mutex_lock(&smi_data_lock);
|
||||
|
||||
if (pos >= smi_data_buf_size) {
|
||||
ret = 0;
|
||||
@ -165,7 +166,7 @@ static ssize_t smi_data_read(struct kobject *kobj, char *buf, loff_t pos,
|
||||
ret = min(max_read, count);
|
||||
memcpy(buf, smi_data_buf + pos, ret);
|
||||
out:
|
||||
up(&smi_data_lock);
|
||||
mutex_unlock(&smi_data_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -174,7 +175,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
down(&smi_data_lock);
|
||||
mutex_lock(&smi_data_lock);
|
||||
|
||||
ret = smi_data_buf_realloc(pos + count);
|
||||
if (ret)
|
||||
@ -183,7 +184,7 @@ static ssize_t smi_data_write(struct kobject *kobj, char *buf, loff_t pos,
|
||||
memcpy(smi_data_buf + pos, buf, count);
|
||||
ret = count;
|
||||
out:
|
||||
up(&smi_data_lock);
|
||||
mutex_unlock(&smi_data_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -201,9 +202,9 @@ static ssize_t host_control_action_store(struct device *dev,
|
||||
ssize_t ret;
|
||||
|
||||
/* make sure buffer is available for host control command */
|
||||
down(&smi_data_lock);
|
||||
mutex_lock(&smi_data_lock);
|
||||
ret = smi_data_buf_realloc(sizeof(struct apm_cmd));
|
||||
up(&smi_data_lock);
|
||||
mutex_unlock(&smi_data_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -302,7 +303,7 @@ static ssize_t smi_request_store(struct device *dev,
|
||||
unsigned long val = simple_strtoul(buf, NULL, 10);
|
||||
ssize_t ret;
|
||||
|
||||
down(&smi_data_lock);
|
||||
mutex_lock(&smi_data_lock);
|
||||
|
||||
if (smi_data_buf_size < sizeof(struct smi_cmd)) {
|
||||
ret = -ENODEV;
|
||||
@ -334,7 +335,7 @@ static ssize_t smi_request_store(struct device *dev,
|
||||
}
|
||||
|
||||
out:
|
||||
up(&smi_data_lock);
|
||||
mutex_unlock(&smi_data_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <net/slhc_vj.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
@ -198,11 +199,11 @@ static unsigned int cardmap_find_first_free(struct cardmap *map);
|
||||
static void cardmap_destroy(struct cardmap **map);
|
||||
|
||||
/*
|
||||
* all_ppp_sem protects the all_ppp_units mapping.
|
||||
* all_ppp_mutex protects the all_ppp_units mapping.
|
||||
* It also ensures that finding a ppp unit in the all_ppp_units map
|
||||
* and updating its file.refcnt field is atomic.
|
||||
*/
|
||||
static DECLARE_MUTEX(all_ppp_sem);
|
||||
static DEFINE_MUTEX(all_ppp_mutex);
|
||||
static struct cardmap *all_ppp_units;
|
||||
static atomic_t ppp_unit_count = ATOMIC_INIT(0);
|
||||
|
||||
@ -804,7 +805,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
|
||||
/* Attach to an existing ppp unit */
|
||||
if (get_user(unit, p))
|
||||
break;
|
||||
down(&all_ppp_sem);
|
||||
mutex_lock(&all_ppp_mutex);
|
||||
err = -ENXIO;
|
||||
ppp = ppp_find_unit(unit);
|
||||
if (ppp != 0) {
|
||||
@ -812,7 +813,7 @@ static int ppp_unattached_ioctl(struct ppp_file *pf, struct file *file,
|
||||
file->private_data = &ppp->file;
|
||||
err = 0;
|
||||
}
|
||||
up(&all_ppp_sem);
|
||||
mutex_unlock(&all_ppp_mutex);
|
||||
break;
|
||||
|
||||
case PPPIOCATTCHAN:
|
||||
@ -2446,7 +2447,7 @@ ppp_create_interface(int unit, int *retp)
|
||||
dev->do_ioctl = ppp_net_ioctl;
|
||||
|
||||
ret = -EEXIST;
|
||||
down(&all_ppp_sem);
|
||||
mutex_lock(&all_ppp_mutex);
|
||||
if (unit < 0)
|
||||
unit = cardmap_find_first_free(all_ppp_units);
|
||||
else if (cardmap_get(all_ppp_units, unit) != NULL)
|
||||
@ -2465,12 +2466,12 @@ ppp_create_interface(int unit, int *retp)
|
||||
|
||||
atomic_inc(&ppp_unit_count);
|
||||
cardmap_set(&all_ppp_units, unit, ppp);
|
||||
up(&all_ppp_sem);
|
||||
mutex_unlock(&all_ppp_mutex);
|
||||
*retp = 0;
|
||||
return ppp;
|
||||
|
||||
out2:
|
||||
up(&all_ppp_sem);
|
||||
mutex_unlock(&all_ppp_mutex);
|
||||
free_netdev(dev);
|
||||
out1:
|
||||
kfree(ppp);
|
||||
@ -2500,7 +2501,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
|
||||
{
|
||||
struct net_device *dev;
|
||||
|
||||
down(&all_ppp_sem);
|
||||
mutex_lock(&all_ppp_mutex);
|
||||
ppp_lock(ppp);
|
||||
dev = ppp->dev;
|
||||
ppp->dev = NULL;
|
||||
@ -2514,7 +2515,7 @@ static void ppp_shutdown_interface(struct ppp *ppp)
|
||||
ppp->file.dead = 1;
|
||||
ppp->owner = NULL;
|
||||
wake_up_interruptible(&ppp->file.rwait);
|
||||
up(&all_ppp_sem);
|
||||
mutex_unlock(&all_ppp_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2556,7 +2557,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
|
||||
|
||||
/*
|
||||
* Locate an existing ppp unit.
|
||||
* The caller should have locked the all_ppp_sem.
|
||||
* The caller should have locked the all_ppp_mutex.
|
||||
*/
|
||||
static struct ppp *
|
||||
ppp_find_unit(int unit)
|
||||
@ -2601,7 +2602,7 @@ ppp_connect_channel(struct channel *pch, int unit)
|
||||
int ret = -ENXIO;
|
||||
int hdrlen;
|
||||
|
||||
down(&all_ppp_sem);
|
||||
mutex_lock(&all_ppp_mutex);
|
||||
ppp = ppp_find_unit(unit);
|
||||
if (ppp == 0)
|
||||
goto out;
|
||||
@ -2626,7 +2627,7 @@ ppp_connect_channel(struct channel *pch, int unit)
|
||||
outl:
|
||||
write_unlock_bh(&pch->upl);
|
||||
out:
|
||||
up(&all_ppp_sem);
|
||||
mutex_unlock(&all_ppp_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user