jfs: Remove use of folio error flag

Store the blk_status per folio (if we can have multiple metapages per
folio) instead of setting the folio error flag.  This will allow us to
reclaim a precious folio flag shortly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-20 03:50:00 +01:00 committed by Dave Kleikamp
parent 3fefd9b594
commit ee6817e72d

View File

@ -76,6 +76,7 @@ static mempool_t *metapage_mempool;
struct meta_anchor { struct meta_anchor {
int mp_count; int mp_count;
atomic_t io_count; atomic_t io_count;
blk_status_t status;
struct metapage *mp[MPS_PER_PAGE]; struct metapage *mp[MPS_PER_PAGE];
}; };
@ -138,12 +139,16 @@ static inline void inc_io(struct folio *folio)
atomic_inc(&anchor->io_count); atomic_inc(&anchor->io_count);
} }
static inline void dec_io(struct folio *folio, void (*handler) (struct folio *)) static inline void dec_io(struct folio *folio, blk_status_t status,
void (*handler)(struct folio *, blk_status_t))
{ {
struct meta_anchor *anchor = folio->private; struct meta_anchor *anchor = folio->private;
if (anchor->status == BLK_STS_OK)
anchor->status = status;
if (atomic_dec_and_test(&anchor->io_count)) if (atomic_dec_and_test(&anchor->io_count))
handler(folio); handler(folio, anchor->status);
} }
#else #else
@ -168,7 +173,7 @@ static inline void remove_metapage(struct folio *folio, struct metapage *mp)
} }
#define inc_io(folio) do {} while(0) #define inc_io(folio) do {} while(0)
#define dec_io(folio, handler) handler(folio) #define dec_io(folio, status, handler) handler(folio, status)
#endif #endif
@ -258,23 +263,20 @@ static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
return lblock; return lblock;
} }
static void last_read_complete(struct folio *folio) static void last_read_complete(struct folio *folio, blk_status_t status)
{ {
if (!folio_test_error(folio)) if (status)
folio_mark_uptodate(folio); printk(KERN_ERR "Read error %d at %#llx\n", status,
folio_unlock(folio); folio_pos(folio));
folio_end_read(folio, status == 0);
} }
static void metapage_read_end_io(struct bio *bio) static void metapage_read_end_io(struct bio *bio)
{ {
struct folio *folio = bio->bi_private; struct folio *folio = bio->bi_private;
if (bio->bi_status) { dec_io(folio, bio->bi_status, last_read_complete);
printk(KERN_ERR "metapage_read_end_io: I/O error\n");
folio_set_error(folio);
}
dec_io(folio, last_read_complete);
bio_put(bio); bio_put(bio);
} }
@ -300,11 +302,17 @@ static void remove_from_logsync(struct metapage *mp)
LOGSYNC_UNLOCK(log, flags); LOGSYNC_UNLOCK(log, flags);
} }
static void last_write_complete(struct folio *folio) static void last_write_complete(struct folio *folio, blk_status_t status)
{ {
struct metapage *mp; struct metapage *mp;
unsigned int offset; unsigned int offset;
if (status) {
int err = blk_status_to_errno(status);
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
mapping_set_error(folio->mapping, err);
}
for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
mp = folio_to_mp(folio, offset); mp = folio_to_mp(folio, offset);
if (mp && test_bit(META_io, &mp->flag)) { if (mp && test_bit(META_io, &mp->flag)) {
@ -326,12 +334,7 @@ static void metapage_write_end_io(struct bio *bio)
BUG_ON(!folio->private); BUG_ON(!folio->private);
if (bio->bi_status) { dec_io(folio, bio->bi_status, last_write_complete);
int err = blk_status_to_errno(bio->bi_status);
printk(KERN_ERR "metapage_write_end_io: I/O error\n");
mapping_set_error(folio->mapping, err);
}
dec_io(folio, last_write_complete);
bio_put(bio); bio_put(bio);
} }
@ -454,10 +457,10 @@ dump_bio:
4, bio, sizeof(*bio), 0); 4, bio, sizeof(*bio), 0);
bio_put(bio); bio_put(bio);
folio_unlock(folio); folio_unlock(folio);
dec_io(folio, last_write_complete); dec_io(folio, BLK_STS_OK, last_write_complete);
err_out: err_out:
while (bad_blocks--) while (bad_blocks--)
dec_io(folio, last_write_complete); dec_io(folio, BLK_STS_OK, last_write_complete);
return -EIO; return -EIO;
} }