mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 20:51:44 +00:00
block: switch gendisk lookup to a simple xarray
Now that bdev_map is only used for finding gendisks, we can use a simple xarray instead of the regions tracking structure for it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
76487f0241
commit
e418de3abc
206
block/genhd.c
206
block/genhd.c
@ -27,15 +27,7 @@
|
||||
|
||||
static struct kobject *block_depr;
|
||||
|
||||
struct bdev_map {
|
||||
struct bdev_map *next;
|
||||
dev_t dev;
|
||||
unsigned long range;
|
||||
struct module *owner;
|
||||
struct kobject *(*probe)(dev_t, int *, void *);
|
||||
int (*lock)(dev_t, void *);
|
||||
void *data;
|
||||
} *bdev_map[255];
|
||||
static DEFINE_XARRAY(bdev_map);
|
||||
static DEFINE_MUTEX(bdev_map_lock);
|
||||
|
||||
/* for extended dynamic devt allocation, currently only one major is used */
|
||||
@ -649,85 +641,26 @@ static char *bdevt_str(dev_t devt, char *buf)
|
||||
return buf;
|
||||
}
|
||||
|
||||
/*
|
||||
* Register device numbers dev..(dev+range-1)
|
||||
* range must be nonzero
|
||||
* The hash chain is sorted on range, so that subranges can override.
|
||||
*/
|
||||
void blk_register_region(dev_t devt, unsigned long range, struct module *module,
|
||||
struct kobject *(*probe)(dev_t, int *, void *),
|
||||
int (*lock)(dev_t, void *), void *data)
|
||||
static void blk_register_region(struct gendisk *disk)
|
||||
{
|
||||
unsigned n = MAJOR(devt + range - 1) - MAJOR(devt) + 1;
|
||||
unsigned index = MAJOR(devt);
|
||||
unsigned i;
|
||||
struct bdev_map *p;
|
||||
|
||||
n = min(n, 255u);
|
||||
p = kmalloc_array(n, sizeof(struct bdev_map), GFP_KERNEL);
|
||||
if (p == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < n; i++, p++) {
|
||||
p->owner = module;
|
||||
p->probe = probe;
|
||||
p->lock = lock;
|
||||
p->dev = devt;
|
||||
p->range = range;
|
||||
p->data = data;
|
||||
}
|
||||
int i;
|
||||
|
||||
mutex_lock(&bdev_map_lock);
|
||||
for (i = 0, p -= n; i < n; i++, p++, index++) {
|
||||
struct bdev_map **s = &bdev_map[index % 255];
|
||||
while (*s && (*s)->range < range)
|
||||
s = &(*s)->next;
|
||||
p->next = *s;
|
||||
*s = p;
|
||||
for (i = 0; i < disk->minors; i++) {
|
||||
if (xa_insert(&bdev_map, disk_devt(disk) + i, disk, GFP_KERNEL))
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_register_region);
|
||||
|
||||
void blk_unregister_region(dev_t devt, unsigned long range)
|
||||
static void blk_unregister_region(struct gendisk *disk)
|
||||
{
|
||||
unsigned n = MAJOR(devt + range - 1) - MAJOR(devt) + 1;
|
||||
unsigned index = MAJOR(devt);
|
||||
unsigned i;
|
||||
struct bdev_map *found = NULL;
|
||||
int i;
|
||||
|
||||
mutex_lock(&bdev_map_lock);
|
||||
for (i = 0; i < min(n, 255u); i++, index++) {
|
||||
struct bdev_map **s;
|
||||
for (s = &bdev_map[index % 255]; *s; s = &(*s)->next) {
|
||||
struct bdev_map *p = *s;
|
||||
if (p->dev == devt && p->range == range) {
|
||||
*s = p->next;
|
||||
if (!found)
|
||||
found = p;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i = 0; i < disk->minors; i++)
|
||||
xa_erase(&bdev_map, disk_devt(disk) + i);
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
kfree(found);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_unregister_region);
|
||||
|
||||
static struct kobject *exact_match(dev_t devt, int *partno, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
|
||||
return &disk_to_dev(p)->kobj;
|
||||
}
|
||||
|
||||
static int exact_lock(dev_t devt, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
|
||||
if (!get_disk_and_module(p))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void disk_scan_partitions(struct gendisk *disk)
|
||||
@ -873,8 +806,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
ret = bdi_register(bdi, "%u:%u", MAJOR(devt), MINOR(devt));
|
||||
WARN_ON(ret);
|
||||
bdi_set_owner(bdi, dev);
|
||||
blk_register_region(disk_devt(disk), disk->minors, NULL,
|
||||
exact_match, exact_lock, disk);
|
||||
blk_register_region(disk);
|
||||
}
|
||||
register_disk(parent, disk, groups);
|
||||
if (register_queue)
|
||||
@ -987,7 +919,7 @@ void del_gendisk(struct gendisk *disk)
|
||||
blk_unregister_queue(disk);
|
||||
|
||||
if (!(disk->flags & GENHD_FL_HIDDEN))
|
||||
blk_unregister_region(disk_devt(disk), disk->minors);
|
||||
blk_unregister_region(disk);
|
||||
/*
|
||||
* Remove gendisk pointer from idr so that it cannot be looked up
|
||||
* while RCU period before freeing gendisk is running to prevent
|
||||
@ -1053,54 +985,22 @@ static void request_gendisk_module(dev_t devt)
|
||||
request_module("block-major-%d", MAJOR(devt));
|
||||
}
|
||||
|
||||
static struct gendisk *lookup_gendisk(dev_t dev, int *partno)
|
||||
static bool get_disk_and_module(struct gendisk *disk)
|
||||
{
|
||||
struct kobject *kobj;
|
||||
struct bdev_map *p;
|
||||
unsigned long best = ~0UL;
|
||||
struct module *owner;
|
||||
|
||||
retry:
|
||||
mutex_lock(&bdev_map_lock);
|
||||
for (p = bdev_map[MAJOR(dev) % 255]; p; p = p->next) {
|
||||
struct kobject *(*probe)(dev_t, int *, void *);
|
||||
struct module *owner;
|
||||
void *data;
|
||||
|
||||
if (p->dev > dev || p->dev + p->range - 1 < dev)
|
||||
continue;
|
||||
if (p->range - 1 >= best)
|
||||
break;
|
||||
if (!try_module_get(p->owner))
|
||||
continue;
|
||||
owner = p->owner;
|
||||
data = p->data;
|
||||
probe = p->probe;
|
||||
best = p->range - 1;
|
||||
*partno = dev - p->dev;
|
||||
|
||||
if (!probe) {
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
module_put(owner);
|
||||
request_gendisk_module(dev);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (p->lock && p->lock(dev, data) < 0) {
|
||||
module_put(owner);
|
||||
continue;
|
||||
}
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
kobj = probe(dev, partno, data);
|
||||
/* Currently ->owner protects _only_ ->probe() itself. */
|
||||
if (!disk->fops)
|
||||
return false;
|
||||
owner = disk->fops->owner;
|
||||
if (owner && !try_module_get(owner))
|
||||
return false;
|
||||
if (!kobject_get_unless_zero(&disk_to_dev(disk)->kobj)) {
|
||||
module_put(owner);
|
||||
if (kobj)
|
||||
return dev_to_disk(kobj_to_dev(kobj));
|
||||
goto retry;
|
||||
return false;
|
||||
}
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
return NULL;
|
||||
}
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* get_gendisk - get partitioning information for a given device
|
||||
@ -1119,7 +1019,19 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
|
||||
might_sleep();
|
||||
|
||||
if (MAJOR(devt) != BLOCK_EXT_MAJOR) {
|
||||
disk = lookup_gendisk(devt, partno);
|
||||
mutex_lock(&bdev_map_lock);
|
||||
disk = xa_load(&bdev_map, devt);
|
||||
if (!disk) {
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
request_gendisk_module(devt);
|
||||
mutex_lock(&bdev_map_lock);
|
||||
disk = xa_load(&bdev_map, devt);
|
||||
}
|
||||
if (disk && !get_disk_and_module(disk))
|
||||
disk = NULL;
|
||||
if (disk)
|
||||
*partno = devt - disk_devt(disk);
|
||||
mutex_unlock(&bdev_map_lock);
|
||||
} else {
|
||||
struct hd_struct *part;
|
||||
|
||||
@ -1323,21 +1235,6 @@ static const struct seq_operations partitions_op = {
|
||||
};
|
||||
#endif
|
||||
|
||||
static void bdev_map_init(void)
|
||||
{
|
||||
struct bdev_map *base;
|
||||
int i;
|
||||
|
||||
base = kzalloc(sizeof(*base), GFP_KERNEL);
|
||||
if (!base)
|
||||
panic("cannot allocate bdev_map");
|
||||
|
||||
base->dev = 1;
|
||||
base->range = ~0 ;
|
||||
for (i = 0; i < 255; i++)
|
||||
bdev_map[i] = base;
|
||||
}
|
||||
|
||||
static int __init genhd_device_init(void)
|
||||
{
|
||||
int error;
|
||||
@ -1346,7 +1243,6 @@ static int __init genhd_device_init(void)
|
||||
error = class_register(&block_class);
|
||||
if (unlikely(error))
|
||||
return error;
|
||||
bdev_map_init();
|
||||
blk_dev_init();
|
||||
|
||||
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
|
||||
@ -1895,35 +1791,6 @@ out_free_disk:
|
||||
}
|
||||
EXPORT_SYMBOL(__alloc_disk_node);
|
||||
|
||||
/**
|
||||
* get_disk_and_module - increments the gendisk and gendisk fops module refcount
|
||||
* @disk: the struct gendisk to increment the refcount for
|
||||
*
|
||||
* This increments the refcount for the struct gendisk, and the gendisk's
|
||||
* fops module owner.
|
||||
*
|
||||
* Context: Any context.
|
||||
*/
|
||||
struct kobject *get_disk_and_module(struct gendisk *disk)
|
||||
{
|
||||
struct module *owner;
|
||||
struct kobject *kobj;
|
||||
|
||||
if (!disk->fops)
|
||||
return NULL;
|
||||
owner = disk->fops->owner;
|
||||
if (owner && !try_module_get(owner))
|
||||
return NULL;
|
||||
kobj = kobject_get_unless_zero(&disk_to_dev(disk)->kobj);
|
||||
if (kobj == NULL) {
|
||||
module_put(owner);
|
||||
return NULL;
|
||||
}
|
||||
return kobj;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(get_disk_and_module);
|
||||
|
||||
/**
|
||||
* put_disk - decrements the gendisk refcount
|
||||
* @disk: the struct gendisk to decrement the refcount for
|
||||
@ -1960,7 +1827,6 @@ void put_disk_and_module(struct gendisk *disk)
|
||||
module_put(owner);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(put_disk_and_module);
|
||||
|
||||
static void set_disk_ro_uevent(struct gendisk *gd, int ro)
|
||||
{
|
||||
|
@ -339,15 +339,8 @@ int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
|
||||
int blk_drop_partitions(struct block_device *bdev);
|
||||
|
||||
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
|
||||
extern struct kobject *get_disk_and_module(struct gendisk *disk);
|
||||
extern void put_disk(struct gendisk *disk);
|
||||
extern void put_disk_and_module(struct gendisk *disk);
|
||||
extern void blk_register_region(dev_t devt, unsigned long range,
|
||||
struct module *module,
|
||||
struct kobject *(*probe)(dev_t, int *, void *),
|
||||
int (*lock)(dev_t, void *),
|
||||
void *data);
|
||||
extern void blk_unregister_region(dev_t devt, unsigned long range);
|
||||
|
||||
#define alloc_disk_node(minors, node_id) \
|
||||
({ \
|
||||
|
Loading…
Reference in New Issue
Block a user