bcachefs: Handle partial pages in seek data/hole

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2019-07-30 13:49:17 -04:00 committed by Kent Overstreet
parent d1542e0362
commit 543ef2ebcd

View File

@ -2833,22 +2833,20 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
/* fseek: */
static bool folio_is_data(struct folio *folio)
static int folio_data_offset(struct folio *folio, unsigned offset)
{
struct bch_page_state *s = bch2_page_state(&folio->page);
unsigned i;
if (!s)
return false;
if (s)
for (i = offset >> 9; i < PAGE_SECTORS; i++)
if (s->s[i].state >= SECTOR_DIRTY)
return i << 9;
for (i = 0; i < PAGE_SECTORS; i++)
if (s->s[i].state >= SECTOR_DIRTY)
return true;
return false;
return -1;
}
static loff_t bch2_next_pagecache_data(struct inode *vinode,
static loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
loff_t end_offset)
{
@ -2857,6 +2855,8 @@ static loff_t bch2_next_pagecache_data(struct inode *vinode,
pgoff_t end_index = end_offset >> PAGE_SHIFT;
pgoff_t index = start_index;
unsigned i;
loff_t ret;
int offset;
folio_batch_init(&fbatch);
@ -2866,14 +2866,17 @@ static loff_t bch2_next_pagecache_data(struct inode *vinode,
struct folio *folio = fbatch.folios[i];
folio_lock(folio);
if (folio_is_data(folio)) {
end_offset =
min(end_offset,
max(start_offset,
((loff_t) folio->index) << PAGE_SHIFT));
offset = folio_data_offset(folio,
folio->index == start_index
? start_offset & (PAGE_SIZE - 1)
: 0);
if (offset >= 0) {
ret = clamp(((loff_t) folio->index << PAGE_SHIFT) +
offset,
start_offset, end_offset);
folio_unlock(folio);
folio_batch_release(&fbatch);
return end_offset;
return ret;
}
folio_unlock(folio);
}
@ -2916,7 +2919,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
return ret;
if (next_data > offset)
next_data = bch2_next_pagecache_data(&inode->v,
next_data = bch2_seek_pagecache_data(&inode->v,
offset, next_data);
if (next_data >= isize)
@ -2925,34 +2928,56 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
return vfs_setpos(file, next_data, MAX_LFS_FILESIZE);
}
static bool page_slot_is_data(struct address_space *mapping, pgoff_t index)
static int __page_hole_offset(struct page *page, unsigned offset)
{
struct bch_page_state *s = bch2_page_state(page);
unsigned i;
if (!s)
return 0;
for (i = offset >> 9; i < PAGE_SECTORS; i++)
if (s->s[i].state < SECTOR_DIRTY)
return i << 9;
return -1;
}
static loff_t page_hole_offset(struct address_space *mapping, loff_t offset)
{
pgoff_t index = offset >> PAGE_SHIFT;
struct page *page;
bool ret;
int pg_offset;
loff_t ret = -1;
page = find_lock_page(mapping, index);
if (!page)
return false;
return offset;
pg_offset = __page_hole_offset(page, offset & (PAGE_SIZE - 1));
if (pg_offset >= 0)
ret = ((loff_t) index << PAGE_SHIFT) + pg_offset;
ret = folio_is_data(page_folio(page));
unlock_page(page);
return ret;
}
static loff_t bch2_next_pagecache_hole(struct inode *vinode,
static loff_t bch2_seek_pagecache_hole(struct inode *vinode,
loff_t start_offset,
loff_t end_offset)
{
struct address_space *mapping = vinode->i_mapping;
pgoff_t index;
loff_t offset = start_offset, hole;
for (index = start_offset >> PAGE_SHIFT;
index < end_offset >> PAGE_SHIFT;
index++)
if (!page_slot_is_data(mapping, index))
end_offset = max(start_offset,
((loff_t) index) << PAGE_SHIFT);
while (offset < end_offset) {
hole = page_hole_offset(mapping, offset);
if (hole >= 0 && hole <= end_offset)
return max(start_offset, hole);
offset += PAGE_SIZE;
offset &= PAGE_MASK;
}
return end_offset;
}
@ -2977,11 +3002,11 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
POS(inode->v.i_ino, offset >> 9),
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
next_hole = bch2_next_pagecache_hole(&inode->v,
next_hole = bch2_seek_pagecache_hole(&inode->v,
offset, MAX_LFS_FILESIZE);
break;
} else if (!bkey_extent_is_data(k.k)) {
next_hole = bch2_next_pagecache_hole(&inode->v,
next_hole = bch2_seek_pagecache_hole(&inode->v,
max(offset, bkey_start_offset(k.k) << 9),
k.k->p.offset << 9);