block: switch partition lookup to use struct block_device

Use struct block_device to lookup partitions on a disk.  This removes
all usage of struct hd_struct from the I/O path.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Acked-by: Coly Li <colyli@suse.de>			[bcache]
Acked-by: Chao Yu <yuchao0@huawei.com>			[f2fs]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-11-24 09:36:54 +01:00 committed by Jens Axboe
parent cb8432d650
commit 8446fe9255
22 changed files with 122 additions and 137 deletions

View File

@ -608,12 +608,12 @@ void bio_truncate(struct bio *bio, unsigned new_size)
void guard_bio_eod(struct bio *bio) void guard_bio_eod(struct bio *bio)
{ {
sector_t maxsector; sector_t maxsector;
struct hd_struct *part; struct block_device *part;
rcu_read_lock(); rcu_read_lock();
part = __disk_get_part(bio->bi_disk, bio->bi_partno); part = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (part) if (part)
maxsector = bdev_nr_sectors(part->bdev); maxsector = bdev_nr_sectors(part);
else else
maxsector = get_capacity(bio->bi_disk); maxsector = get_capacity(bio->bi_disk);
rcu_read_unlock(); rcu_read_unlock();

View File

@ -666,10 +666,9 @@ static int __init setup_fail_make_request(char *str)
} }
__setup("fail_make_request=", setup_fail_make_request); __setup("fail_make_request=", setup_fail_make_request);
static bool should_fail_request(struct hd_struct *part, unsigned int bytes) static bool should_fail_request(struct block_device *part, unsigned int bytes)
{ {
return part->bdev->bd_make_it_fail && return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
should_fail(&fail_make_request, bytes);
} }
static int __init fail_make_request_debugfs(void) static int __init fail_make_request_debugfs(void)
@ -684,7 +683,7 @@ late_initcall(fail_make_request_debugfs);
#else /* CONFIG_FAIL_MAKE_REQUEST */ #else /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool should_fail_request(struct hd_struct *part, static inline bool should_fail_request(struct block_device *part,
unsigned int bytes) unsigned int bytes)
{ {
return false; return false;
@ -692,11 +691,11 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */ #endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
{ {
const int op = bio_op(bio); const int op = bio_op(bio);
if (part->bdev->bd_read_only && op_is_write(op)) { if (part->bd_read_only && op_is_write(op)) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
@ -704,7 +703,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
WARN_ONCE(1, WARN_ONCE(1,
"Trying to write to read-only block-device %s (partno %d)\n", "Trying to write to read-only block-device %s (partno %d)\n",
bio_devname(bio, b), part->partno); bio_devname(bio, b), part->bd_partno);
/* Older lvm-tools actually trigger this */ /* Older lvm-tools actually trigger this */
return false; return false;
} }
@ -714,8 +713,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
static noinline int should_fail_bio(struct bio *bio) static noinline int should_fail_bio(struct bio *bio)
{ {
if (should_fail_request(bio->bi_disk->part0->bd_part, if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
bio->bi_iter.bi_size))
return -EIO; return -EIO;
return 0; return 0;
} }
@ -744,7 +742,7 @@ static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
*/ */
static inline int blk_partition_remap(struct bio *bio) static inline int blk_partition_remap(struct bio *bio)
{ {
struct hd_struct *p; struct block_device *p;
int ret = -EIO; int ret = -EIO;
rcu_read_lock(); rcu_read_lock();
@ -757,12 +755,12 @@ static inline int blk_partition_remap(struct bio *bio)
goto out; goto out;
if (bio_sectors(bio)) { if (bio_sectors(bio)) {
if (bio_check_eod(bio, bdev_nr_sectors(p->bdev))) if (bio_check_eod(bio, bdev_nr_sectors(p)))
goto out; goto out;
bio->bi_iter.bi_sector += p->bdev->bd_start_sect; bio->bi_iter.bi_sector += p->bd_start_sect;
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p), trace_block_bio_remap(bio->bi_disk->queue, bio, p->bd_dev,
bio->bi_iter.bi_sector - bio->bi_iter.bi_sector -
p->bdev->bd_start_sect); p->bd_start_sect);
} }
bio->bi_partno = 0; bio->bi_partno = 0;
ret = 0; ret = 0;
@ -832,7 +830,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
if (unlikely(blk_partition_remap(bio))) if (unlikely(blk_partition_remap(bio)))
goto end_io; goto end_io;
} else { } else {
if (unlikely(bio_check_ro(bio, bio->bi_disk->part0->bd_part))) if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
goto end_io; goto end_io;
if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk)))) if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
goto end_io; goto end_io;
@ -1204,7 +1202,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
return ret; return ret;
if (rq->rq_disk && if (rq->rq_disk &&
should_fail_request(rq->rq_disk->part0->bd_part, blk_rq_bytes(rq))) should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
return BLK_STS_IOERR; return BLK_STS_IOERR;
if (blk_crypto_insert_cloned_request(rq)) if (blk_crypto_insert_cloned_request(rq))
@ -1263,17 +1261,18 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
} }
EXPORT_SYMBOL_GPL(blk_rq_err_bytes); EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end) static void update_io_ticks(struct block_device *part, unsigned long now,
bool end)
{ {
unsigned long stamp; unsigned long stamp;
again: again:
stamp = READ_ONCE(part->bdev->bd_stamp); stamp = READ_ONCE(part->bd_stamp);
if (unlikely(stamp != now)) { if (unlikely(stamp != now)) {
if (likely(cmpxchg(&part->bdev->bd_stamp, stamp, now) == stamp)) if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
__part_stat_add(part, io_ticks, end ? now - stamp : 1); __part_stat_add(part, io_ticks, end ? now - stamp : 1);
} }
if (part->partno) { if (part->bd_partno) {
part = part_to_disk(part)->part0->bd_part; part = bdev_whole(part);
goto again; goto again;
} }
} }
@ -1282,11 +1281,9 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
{ {
if (req->part && blk_do_io_stat(req)) { if (req->part && blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req)); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
part_stat_lock(); part_stat_lock();
part = req->part; part_stat_add(req->part, sectors[sgrp], bytes >> 9);
part_stat_add(part, sectors[sgrp], bytes >> 9);
part_stat_unlock(); part_stat_unlock();
} }
} }
@ -1301,14 +1298,11 @@ void blk_account_io_done(struct request *req, u64 now)
if (req->part && blk_do_io_stat(req) && if (req->part && blk_do_io_stat(req) &&
!(req->rq_flags & RQF_FLUSH_SEQ)) { !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req)); const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
part_stat_lock(); part_stat_lock();
part = req->part; update_io_ticks(req->part, jiffies, true);
part_stat_inc(req->part, ios[sgrp]);
update_io_ticks(part, jiffies, true); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
part_stat_inc(part, ios[sgrp]);
part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
part_stat_unlock(); part_stat_unlock();
} }
} }
@ -1325,7 +1319,7 @@ void blk_account_io_start(struct request *rq)
part_stat_unlock(); part_stat_unlock();
} }
static unsigned long __part_start_io_acct(struct hd_struct *part, static unsigned long __part_start_io_acct(struct block_device *part,
unsigned int sectors, unsigned int op) unsigned int sectors, unsigned int op)
{ {
const int sgrp = op_stat_group(op); const int sgrp = op_stat_group(op);
@ -1341,7 +1335,7 @@ static unsigned long __part_start_io_acct(struct hd_struct *part,
return now; return now;
} }
unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part, unsigned long part_start_io_acct(struct gendisk *disk, struct block_device **part,
struct bio *bio) struct bio *bio)
{ {
*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector); *part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
@ -1353,11 +1347,11 @@ EXPORT_SYMBOL_GPL(part_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors, unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op) unsigned int op)
{ {
return __part_start_io_acct(disk->part0->bd_part, sectors, op); return __part_start_io_acct(disk->part0, sectors, op);
} }
EXPORT_SYMBOL(disk_start_io_acct); EXPORT_SYMBOL(disk_start_io_acct);
static void __part_end_io_acct(struct hd_struct *part, unsigned int op, static void __part_end_io_acct(struct block_device *part, unsigned int op,
unsigned long start_time) unsigned long start_time)
{ {
const int sgrp = op_stat_group(op); const int sgrp = op_stat_group(op);
@ -1371,7 +1365,7 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
part_stat_unlock(); part_stat_unlock();
} }
void part_end_io_acct(struct hd_struct *part, struct bio *bio, void part_end_io_acct(struct block_device *part, struct bio *bio,
unsigned long start_time) unsigned long start_time)
{ {
__part_end_io_acct(part, bio_op(bio), start_time); __part_end_io_acct(part, bio_op(bio), start_time);
@ -1381,7 +1375,7 @@ EXPORT_SYMBOL_GPL(part_end_io_acct);
void disk_end_io_acct(struct gendisk *disk, unsigned int op, void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time) unsigned long start_time)
{ {
__part_end_io_acct(disk->part0->bd_part, op, start_time); __part_end_io_acct(disk->part0, op, start_time);
} }
EXPORT_SYMBOL(disk_end_io_acct); EXPORT_SYMBOL(disk_end_io_acct);

View File

@ -139,7 +139,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
static void blk_account_io_flush(struct request *rq) static void blk_account_io_flush(struct request *rq)
{ {
struct hd_struct *part = rq->rq_disk->part0->bd_part; struct block_device *part = rq->rq_disk->part0;
part_stat_lock(); part_stat_lock();
part_stat_inc(part, ios[STAT_FLUSH]); part_stat_inc(part, ios[STAT_FLUSH]);

View File

@ -95,7 +95,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
} }
struct mq_inflight { struct mq_inflight {
struct hd_struct *part; struct block_device *part;
unsigned int inflight[2]; unsigned int inflight[2];
}; };
@ -111,7 +111,8 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
return true; return true;
} }
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) unsigned int blk_mq_in_flight(struct request_queue *q,
struct block_device *part)
{ {
struct mq_inflight mi = { .part = part }; struct mq_inflight mi = { .part = part };
@ -120,8 +121,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
return mi.inflight[0] + mi.inflight[1]; return mi.inflight[0] + mi.inflight[1];
} }
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
unsigned int inflight[2]) unsigned int inflight[2])
{ {
struct mq_inflight mi = { .part = part }; struct mq_inflight mi = { .part = part };

View File

@ -182,9 +182,10 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags; return hctx->nr_ctx && hctx->tags;
} }
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); unsigned int blk_mq_in_flight(struct request_queue *q,
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, struct block_device *part);
unsigned int inflight[2]); void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct request_queue *q) static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
{ {

View File

@ -215,7 +215,7 @@ static inline void elevator_exit(struct request_queue *q,
__elevator_exit(q, e); __elevator_exit(q, e);
} }
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno); struct block_device *__disk_get_part(struct gendisk *disk, int partno);
ssize_t part_size_show(struct device *dev, struct device_attribute *attr, ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf); char *buf);
@ -348,7 +348,7 @@ void blk_queue_free_zone_bitmaps(struct request_queue *q);
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
#endif #endif
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector); struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
int blk_alloc_devt(struct hd_struct *part, dev_t *devt); int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
void blk_free_devt(dev_t devt); void blk_free_devt(dev_t devt);

View File

@ -126,7 +126,7 @@ static void part_stat_read_all(struct hd_struct *part, struct disk_stats *stat)
} }
} }
static unsigned int part_in_flight(struct hd_struct *part) static unsigned int part_in_flight(struct block_device *part)
{ {
unsigned int inflight = 0; unsigned int inflight = 0;
int cpu; int cpu;
@ -141,7 +141,8 @@ static unsigned int part_in_flight(struct hd_struct *part)
return inflight; return inflight;
} }
static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2]) static void part_in_flight_rw(struct block_device *part,
unsigned int inflight[2])
{ {
int cpu; int cpu;
@ -157,7 +158,7 @@ static void part_in_flight_rw(struct hd_struct *part, unsigned int inflight[2])
inflight[1] = 0; inflight[1] = 0;
} }
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) struct block_device *__disk_get_part(struct gendisk *disk, int partno)
{ {
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);
@ -182,15 +183,21 @@ struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
*/ */
struct hd_struct *disk_get_part(struct gendisk *disk, int partno) struct hd_struct *disk_get_part(struct gendisk *disk, int partno)
{ {
struct block_device *bdev;
struct hd_struct *part; struct hd_struct *part;
rcu_read_lock(); rcu_read_lock();
part = __disk_get_part(disk, partno); bdev = __disk_get_part(disk, partno);
if (part) if (!bdev)
get_device(part_to_dev(part)); goto fail;
part = bdev->bd_part;
if (!kobject_get_unless_zero(&part_to_dev(part)->kobj))
goto fail;
rcu_read_unlock(); rcu_read_unlock();
return part; return part;
fail:
rcu_read_unlock();
return NULL;
} }
/** /**
@ -264,19 +271,19 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter)
/* iterate to the next partition */ /* iterate to the next partition */
for (; piter->idx != end; piter->idx += inc) { for (; piter->idx != end; piter->idx += inc) {
struct hd_struct *part; struct block_device *part;
part = rcu_dereference(ptbl->part[piter->idx]); part = rcu_dereference(ptbl->part[piter->idx]);
if (!part) if (!part)
continue; continue;
if (!bdev_nr_sectors(part->bdev) && if (!bdev_nr_sectors(part) &&
!(piter->flags & DISK_PITER_INCL_EMPTY) && !(piter->flags & DISK_PITER_INCL_EMPTY) &&
!(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 &&
piter->idx == 0)) piter->idx == 0))
continue; continue;
get_device(part_to_dev(part)); get_device(part_to_dev(part->bd_part));
piter->part = part; piter->part = part->bd_part;
piter->idx += inc; piter->idx += inc;
break; break;
} }
@ -303,10 +310,10 @@ void disk_part_iter_exit(struct disk_part_iter *piter)
} }
EXPORT_SYMBOL_GPL(disk_part_iter_exit); EXPORT_SYMBOL_GPL(disk_part_iter_exit);
static inline int sector_in_part(struct hd_struct *part, sector_t sector) static inline int sector_in_part(struct block_device *part, sector_t sector)
{ {
return part->bdev->bd_start_sect <= sector && return part->bd_start_sect <= sector &&
sector < part->bdev->bd_start_sect + bdev_nr_sectors(part->bdev); sector < part->bd_start_sect + bdev_nr_sectors(part);
} }
/** /**
@ -324,10 +331,10 @@ static inline int sector_in_part(struct hd_struct *part, sector_t sector)
* Found partition on success, part0 is returned if no partition matches * Found partition on success, part0 is returned if no partition matches
* or the matched partition is being deleted. * or the matched partition is being deleted.
*/ */
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
{ {
struct disk_part_tbl *ptbl; struct disk_part_tbl *ptbl;
struct hd_struct *part; struct block_device *part;
int i; int i;
rcu_read_lock(); rcu_read_lock();
@ -346,7 +353,7 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
} }
} }
part = disk->part0->bd_part; part = disk->part0;
out_unlock: out_unlock:
rcu_read_unlock(); rcu_read_unlock();
return part; return part;
@ -882,7 +889,7 @@ void del_gendisk(struct gendisk *disk)
kobject_put(disk->part0->bd_holder_dir); kobject_put(disk->part0->bd_holder_dir);
kobject_put(disk->slave_dir); kobject_put(disk->slave_dir);
part_stat_set_all(disk->part0->bd_part, 0); part_stat_set_all(disk->part0, 0);
disk->part0->bd_stamp = 0; disk->part0->bd_stamp = 0;
if (!sysfs_deprecated) if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
@ -1189,9 +1196,9 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read_all(p, &stat); part_stat_read_all(p, &stat);
if (queue_is_mq(q)) if (queue_is_mq(q))
inflight = blk_mq_in_flight(q, p); inflight = blk_mq_in_flight(q, p->bdev);
else else
inflight = part_in_flight(p); inflight = part_in_flight(p->bdev);
return sprintf(buf, return sprintf(buf,
"%8lu %8lu %8llu %8u " "%8lu %8lu %8llu %8u "
@ -1231,9 +1238,9 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
unsigned int inflight[2]; unsigned int inflight[2];
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_in_flight_rw(q, p, inflight); blk_mq_in_flight_rw(q, p->bdev, inflight);
else else
part_in_flight_rw(p, inflight); part_in_flight_rw(p->bdev, inflight);
return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
} }
@ -1506,9 +1513,9 @@ static int diskstats_show(struct seq_file *seqf, void *v)
while ((hd = disk_part_iter_next(&piter))) { while ((hd = disk_part_iter_next(&piter))) {
part_stat_read_all(hd, &stat); part_stat_read_all(hd, &stat);
if (queue_is_mq(gp->queue)) if (queue_is_mq(gp->queue))
inflight = blk_mq_in_flight(gp->queue, hd); inflight = blk_mq_in_flight(gp->queue, hd->bdev);
else else
inflight = part_in_flight(hd); inflight = part_in_flight(hd->bdev);
seq_printf(seqf, "%4d %7d %s " seq_printf(seqf, "%4d %7d %s "
"%lu %lu %lu %u " "%lu %lu %lu %u "
@ -1626,7 +1633,7 @@ struct gendisk *__alloc_disk_node(int minors, int node_id)
goto out_bdput; goto out_bdput;
ptbl = rcu_dereference_protected(disk->part_tbl, 1); ptbl = rcu_dereference_protected(disk->part_tbl, 1);
rcu_assign_pointer(ptbl->part[0], disk->part0->bd_part); rcu_assign_pointer(ptbl->part[0], disk->part0);
disk->minors = minors; disk->minors = minors;
rand_initialize_disk(disk); rand_initialize_disk(disk);

View File

@ -298,12 +298,9 @@ void delete_partition(struct hd_struct *part)
struct disk_part_tbl *ptbl = struct disk_part_tbl *ptbl =
rcu_dereference_protected(disk->part_tbl, 1); rcu_dereference_protected(disk->part_tbl, 1);
/*
* ->part_tbl is referenced in this part's release handler, so
* we have to hold the disk device
*/
rcu_assign_pointer(ptbl->part[part->partno], NULL); rcu_assign_pointer(ptbl->part[part->partno], NULL);
rcu_assign_pointer(ptbl->last_lookup, NULL); rcu_assign_pointer(ptbl->last_lookup, NULL);
kobject_put(part->bdev->bd_holder_dir); kobject_put(part->bdev->bd_holder_dir);
device_del(part_to_dev(part)); device_del(part_to_dev(part));
@ -421,7 +418,7 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
/* everything is up and running, commence */ /* everything is up and running, commence */
bdev_add(bdev, devt); bdev_add(bdev, devt);
rcu_assign_pointer(ptbl->part[partno], p); rcu_assign_pointer(ptbl->part[partno], bdev);
/* suppress uevent if the disk suppresses it */ /* suppress uevent if the disk suppresses it */
if (!dev_get_uevent_suppress(ddev)) if (!dev_get_uevent_suppress(ddev))

View File

@ -2802,7 +2802,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
if (c_min_rate == 0) if (c_min_rate == 0)
return false; return false;
curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) - curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&device->rs_sect_ev); atomic_read(&device->rs_sect_ev);
if (atomic_read(&device->ap_actlog_cnt) if (atomic_read(&device->ap_actlog_cnt)

View File

@ -1679,7 +1679,7 @@ void drbd_rs_controller_reset(struct drbd_device *device)
atomic_set(&device->rs_sect_ev, 0); atomic_set(&device->rs_sect_ev, 0);
device->rs_in_flight = 0; device->rs_in_flight = 0;
device->rs_last_events = device->rs_last_events =
(int)part_stat_read_accum(disk->part0->bd_part, sectors); (int)part_stat_read_accum(disk->part0, sectors);
/* Updating the RCU protected object in place is necessary since /* Updating the RCU protected object in place is necessary since
this function gets called from atomic context. this function gets called from atomic context.

View File

@ -1687,7 +1687,7 @@ static void zram_reset_device(struct zram *zram)
zram->disksize = 0; zram->disksize = 0;
set_capacity_and_notify(zram->disk, 0); set_capacity_and_notify(zram->disk, 0);
part_stat_set_all(zram->disk->part0->bd_part, 0); part_stat_set_all(zram->disk->part0, 0);
up_write(&zram->init_lock); up_write(&zram->init_lock);
/* I/O operation under all of CPU are done so let's free */ /* I/O operation under all of CPU are done so let's free */

View File

@ -475,7 +475,7 @@ struct search {
unsigned int read_dirty_data:1; unsigned int read_dirty_data:1;
unsigned int cache_missed:1; unsigned int cache_missed:1;
struct hd_struct *part; struct block_device *part;
unsigned long start_time; unsigned long start_time;
struct btree_op op; struct btree_op op;
@ -1073,7 +1073,7 @@ struct detached_dev_io_private {
unsigned long start_time; unsigned long start_time;
bio_end_io_t *bi_end_io; bio_end_io_t *bi_end_io;
void *bi_private; void *bi_private;
struct hd_struct *part; struct block_device *part;
}; };
static void detached_dev_end_io(struct bio *bio) static void detached_dev_end_io(struct bio *bio)

View File

@ -1607,7 +1607,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* (by eliminating DM's splitting and just using bio_split) * (by eliminating DM's splitting and just using bio_split)
*/ */
part_stat_lock(); part_stat_lock();
__dm_part_stat_sub(dm_disk(md)->part0->bd_part, __dm_part_stat_sub(dm_disk(md)->part0,
sectors[op_stat_group(bio_op(bio))], ci.sector_count); sectors[op_stat_group(bio_op(bio))], ci.sector_count);
part_stat_unlock(); part_stat_unlock();
@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(dm_put);
static bool md_in_flight_bios(struct mapped_device *md) static bool md_in_flight_bios(struct mapped_device *md)
{ {
int cpu; int cpu;
struct hd_struct *part = dm_disk(md)->part0->bd_part; struct block_device *part = dm_disk(md)->part0;
long sum = 0; long sum = 0;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {

View File

@ -464,7 +464,7 @@ struct md_io {
bio_end_io_t *orig_bi_end_io; bio_end_io_t *orig_bi_end_io;
void *orig_bi_private; void *orig_bi_private;
unsigned long start_time; unsigned long start_time;
struct hd_struct *part; struct block_device *part;
}; };
static void md_end_io(struct bio *bio) static void md_end_io(struct bio *bio)
@ -8441,7 +8441,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
rcu_read_lock(); rcu_read_lock();
rdev_for_each_rcu(rdev, mddev) { rdev_for_each_rcu(rdev, mddev) {
struct gendisk *disk = rdev->bdev->bd_disk; struct gendisk *disk = rdev->bdev->bd_disk;
curr_events = (int)part_stat_read_accum(disk->part0->bd_part, sectors) - curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
atomic_read(&disk->sync_io); atomic_read(&disk->sync_io);
/* sync IO will cause sync_io to increase before the disk_stats /* sync IO will cause sync_io to increase before the disk_stats
* as sync_io is counted when a request starts, and * as sync_io is counted when a request starts, and

View File

@ -89,12 +89,12 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
if (!ns->bdev) if (!ns->bdev)
goto out; goto out;
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); host_reads = part_stat_read(ns->bdev, ios[READ]);
data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, data_units_read =
sectors[READ]), 1000); DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); host_writes = part_stat_read(ns->bdev, ios[WRITE]);
data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part, data_units_written =
sectors[WRITE]), 1000); DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(host_reads, &slog->host_reads[0]);
put_unaligned_le64(data_units_read, &slog->data_units_read[0]); put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
@ -120,12 +120,12 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
/* we don't have the right data for file backed ns */ /* we don't have the right data for file backed ns */
if (!ns->bdev) if (!ns->bdev)
continue; continue;
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); host_reads += part_stat_read(ns->bdev, ios[READ]);
data_units_read += DIV_ROUND_UP( data_units_read += DIV_ROUND_UP(
part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000); part_stat_read(ns->bdev, sectors[READ]), 1000);
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); host_writes += part_stat_read(ns->bdev, ios[WRITE]);
data_units_written += DIV_ROUND_UP( data_units_written += DIV_ROUND_UP(
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000); part_stat_read(ns->bdev, sectors[WRITE]), 1000);
} }
put_unaligned_le64(host_reads, &slog->host_reads[0]); put_unaligned_le64(host_reads, &slog->host_reads[0]);

View File

@ -4048,9 +4048,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_sb = sb; sbi->s_sb = sb;
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
sbi->s_sb_block = sb_block; sbi->s_sb_block = sb_block;
if (sb->s_bdev->bd_part) sbi->s_sectors_written_start =
sbi->s_sectors_written_start = part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
/* Cleanup superblock name */ /* Cleanup superblock name */
strreplace(sb->s_id, '/', '!'); strreplace(sb->s_id, '/', '!');
@ -5509,15 +5508,10 @@ static int ext4_commit_super(struct super_block *sb, int sync)
*/ */
if (!(sb->s_flags & SB_RDONLY)) if (!(sb->s_flags & SB_RDONLY))
ext4_update_tstamp(es, s_wtime); ext4_update_tstamp(es, s_wtime);
if (sb->s_bdev->bd_part) es->s_kbytes_written =
es->s_kbytes_written = cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written + ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
((part_stat_read(sb->s_bdev->bd_part, EXT4_SB(sb)->s_sectors_written_start) >> 1));
sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1));
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter)) if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
ext4_free_blocks_count_set(es, ext4_free_blocks_count_set(es,
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive( EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(

View File

@ -62,11 +62,8 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{ {
struct super_block *sb = sbi->s_buddy_cache->i_sb; struct super_block *sb = sbi->s_buddy_cache->i_sb;
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%lu\n", return snprintf(buf, PAGE_SIZE, "%lu\n",
(part_stat_read(sb->s_bdev->bd_part, (part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
sectors[STAT_WRITE]) -
sbi->s_sectors_written_start) >> 1); sbi->s_sectors_written_start) >> 1);
} }
@ -74,12 +71,9 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
{ {
struct super_block *sb = sbi->s_buddy_cache->i_sb; struct super_block *sb = sbi->s_buddy_cache->i_sb;
if (!sb->s_bdev->bd_part)
return snprintf(buf, PAGE_SIZE, "0\n");
return snprintf(buf, PAGE_SIZE, "%llu\n", return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)(sbi->s_kbytes_written + (unsigned long long)(sbi->s_kbytes_written +
((part_stat_read(sb->s_bdev->bd_part, ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
sectors[STAT_WRITE]) -
EXT4_SB(sb)->s_sectors_written_start) >> 1))); EXT4_SB(sb)->s_sectors_written_start) >> 1)));
} }

View File

@ -1675,7 +1675,7 @@ static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
* and the return value is in kbytes. s is of struct f2fs_sb_info. * and the return value is in kbytes. s is of struct f2fs_sb_info.
*/ */
#define BD_PART_WRITTEN(s) \ #define BD_PART_WRITTEN(s) \
(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \ (((u64)part_stat_read((s)->sb->s_bdev, sectors[STAT_WRITE]) - \
(s)->sectors_written_start) >> 1) (s)->sectors_written_start) >> 1)
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type) static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)

View File

@ -3700,10 +3700,8 @@ try_onemore:
} }
/* For write statistics */ /* For write statistics */
if (sb->s_bdev->bd_part) sbi->sectors_written_start =
sbi->sectors_written_start = (u64)part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
(u64)part_stat_read(sb->s_bdev->bd_part,
sectors[STAT_WRITE]);
/* Read accumulated write IO statistics if exists */ /* Read accumulated write IO statistics if exists */
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);

View File

@ -191,7 +191,7 @@ struct request {
}; };
struct gendisk *rq_disk; struct gendisk *rq_disk;
struct hd_struct *part; struct block_device *part;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME #ifdef CONFIG_BLK_RQ_ALLOC_TIME
/* Time that the first bio started allocating this request. */ /* Time that the first bio started allocating this request. */
u64 alloc_time_ns; u64 alloc_time_ns;
@ -1943,9 +1943,9 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
void disk_end_io_acct(struct gendisk *disk, unsigned int op, void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time); unsigned long start_time);
unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part, unsigned long part_start_io_acct(struct gendisk *disk,
struct bio *bio); struct block_device **part, struct bio *bio);
void part_end_io_acct(struct hd_struct *part, struct bio *bio, void part_end_io_acct(struct block_device *part, struct bio *bio,
unsigned long start_time); unsigned long start_time);
/** /**

View File

@ -131,8 +131,8 @@ enum {
struct disk_part_tbl { struct disk_part_tbl {
struct rcu_head rcu_head; struct rcu_head rcu_head;
int len; int len;
struct hd_struct __rcu *last_lookup; struct block_device __rcu *last_lookup;
struct hd_struct __rcu *part[]; struct block_device __rcu *part[];
}; };
struct disk_events; struct disk_events;

View File

@ -25,26 +25,26 @@ struct disk_stats {
#define part_stat_unlock() preempt_enable() #define part_stat_unlock() preempt_enable()
#define part_stat_get_cpu(part, field, cpu) \ #define part_stat_get_cpu(part, field, cpu) \
(per_cpu_ptr((part)->bdev->bd_stats, (cpu))->field) (per_cpu_ptr((part)->bd_stats, (cpu))->field)
#define part_stat_get(part, field) \ #define part_stat_get(part, field) \
part_stat_get_cpu(part, field, smp_processor_id()) part_stat_get_cpu(part, field, smp_processor_id())
#define part_stat_read(part, field) \ #define part_stat_read(part, field) \
({ \ ({ \
typeof((part)->bdev->bd_stats->field) res = 0; \ typeof((part)->bd_stats->field) res = 0; \
unsigned int _cpu; \ unsigned int _cpu; \
for_each_possible_cpu(_cpu) \ for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->bdev->bd_stats, _cpu)->field; \ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \
res; \ res; \
}) })
static inline void part_stat_set_all(struct hd_struct *part, int value) static inline void part_stat_set_all(struct block_device *part, int value)
{ {
int i; int i;
for_each_possible_cpu(i) for_each_possible_cpu(i)
memset(per_cpu_ptr(part->bdev->bd_stats, i), value, memset(per_cpu_ptr(part->bd_stats, i), value,
sizeof(struct disk_stats)); sizeof(struct disk_stats));
} }
@ -54,13 +54,12 @@ static inline void part_stat_set_all(struct hd_struct *part, int value)
part_stat_read(part, field[STAT_DISCARD])) part_stat_read(part, field[STAT_DISCARD]))
#define __part_stat_add(part, field, addnd) \ #define __part_stat_add(part, field, addnd) \
__this_cpu_add((part)->bdev->bd_stats->field, addnd) __this_cpu_add((part)->bd_stats->field, addnd)
#define part_stat_add(part, field, addnd) do { \ #define part_stat_add(part, field, addnd) do { \
__part_stat_add((part), field, addnd); \ __part_stat_add((part), field, addnd); \
if ((part)->partno) \ if ((part)->bd_partno) \
__part_stat_add(part_to_disk((part))->part0->bd_part, \ __part_stat_add(bdev_whole(part), field, addnd); \
field, addnd); \
} while (0) } while (0)
#define part_stat_dec(part, field) \ #define part_stat_dec(part, field) \