for-5.15-rc7-tag

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE8rQSAMVO+zA4DBdWxWXV+ddtWDsFAmF72q0ACgkQxWXV+ddt
 WDvFOxAAkcryx2FP5aqaoMzBKfoCtMFHO3uAvm+rsMcglWe5kaXhBnHa2HPzoyEh
 YqEx2TeXMTuA2I15bU8KV1RMhQzzRjC4NhdRqY6uaKAcKgON6sJlK5qsq2BnB+V3
 nrue1jppM2Vv8wNzjMNeVETQNC7pmg29yQP/fvWaB36Yar2tyfyWDF11e42HR7cU
 yLQUedg30WEayz3Mp6MTBF36h09WXQrZSs7Iwk1JMQbpxWcpn2CjXrO+vIZOMdvH
 XZZsxBTNB8GJIaJlXssgsq3OP2wspK1lrVHNfi5PYtcZEaFrhkPaVB6enDfd41YV
 zXwj1dnemCni9fh88gZprel9bLyB37dSVfIqq2Ly3hQbSAN4dmHIpxGwPSRIr+Hl
 Bn3UfClHpAftbpd/Y77U7GgcYnkuRo3Bd4mGTF3ZuPDLVrf/QX5BlfGa2dmJYoml
 NfBit7Ha4UrxLW6C8RC6fyEbLQxpNYFY55Ra0Tj0BBO/uhWiqtQGZwC/qbyPKfzN
 YZFcPR6iTILoCHXNan3iZIuLeASMT0djgAtunXXf/BuFnxGfnOuqL3bKt2vojh3+
 rsqpeIxSP/VklKv4JcP3axeLmUK6cA8/9dV2ES0M0Fc0o341jfh+AoVw0GleFeus
 gXlDFPRJeE8yyXmjKyW4shctOczqoeMIq3umebXPP9R4jd/LU/g=
 =YWGa
 -----END PGP SIGNATURE-----

Merge tag 'for-5.15-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "Last minute fixes for crash on 32bit architectures when compression is
  in use. It's a regression introduced in 5.15-rc and I'd really like
  not let this into the final release, fixes via stable trees would add
  unnecessary delay.

  The problem is on 32bit architectures with highmem enabled, the pages
  for compression may need to be kmapped, while the patches removed that
  as we don't use GFP_HIGHMEM allocations anymore. The pages that don't
  come from local allocation still may be from highmem. Despite being on
  32bit there's enough such ARM machines in use so it's not a marginal
  issue.

  I did full reverts of the patches one by one instead of a huge one.
  There's one exception for the "lzo" revert as there was an
  intermediate patch touching the same code to make it compatible with
  subpage. I can't revert that one too, so the revert in lzo.c is
  manual. Qu Wenruo has worked on that with me and verified the changes"

* tag 'for-5.15-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  Revert "btrfs: compression: drop kmap/kunmap from lzo"
  Revert "btrfs: compression: drop kmap/kunmap from zlib"
  Revert "btrfs: compression: drop kmap/kunmap from zstd"
  Revert "btrfs: compression: drop kmap/kunmap from generic helpers"
This commit is contained in:
Linus Torvalds 2021-10-29 10:46:59 -07:00
commit fd919bbd33
5 changed files with 72 additions and 33 deletions

View File

@ -172,9 +172,10 @@ static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
/* Hash through the page sector by sector */ /* Hash through the page sector by sector */
for (pg_offset = 0; pg_offset < bytes_left; for (pg_offset = 0; pg_offset < bytes_left;
pg_offset += sectorsize) { pg_offset += sectorsize) {
kaddr = page_address(page); kaddr = kmap_atomic(page);
crypto_shash_digest(shash, kaddr + pg_offset, crypto_shash_digest(shash, kaddr + pg_offset,
sectorsize, csum); sectorsize, csum);
kunmap_atomic(kaddr);
if (memcmp(&csum, cb_sum, csum_size) != 0) { if (memcmp(&csum, cb_sum, csum_size) != 0) {
btrfs_print_data_csum_error(inode, disk_start, btrfs_print_data_csum_error(inode, disk_start,

View File

@ -287,8 +287,9 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
cur_size = min_t(unsigned long, compressed_size, cur_size = min_t(unsigned long, compressed_size,
PAGE_SIZE); PAGE_SIZE);
kaddr = page_address(cpage); kaddr = kmap_atomic(cpage);
write_extent_buffer(leaf, kaddr, ptr, cur_size); write_extent_buffer(leaf, kaddr, ptr, cur_size);
kunmap_atomic(kaddr);
i++; i++;
ptr += cur_size; ptr += cur_size;

View File

@ -141,7 +141,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = 0; *total_in = 0;
in_page = find_get_page(mapping, start >> PAGE_SHIFT); in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = page_address(in_page); data_in = kmap(in_page);
/* /*
* store the size of all chunks of compressed data in * store the size of all chunks of compressed data in
@ -152,7 +152,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cpage_out = page_address(out_page); cpage_out = kmap(out_page);
out_offset = LZO_LEN; out_offset = LZO_LEN;
tot_out = LZO_LEN; tot_out = LZO_LEN;
pages[0] = out_page; pages[0] = out_page;
@ -210,6 +210,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
if (out_len == 0 && tot_in >= len) if (out_len == 0 && tot_in >= len)
break; break;
kunmap(out_page);
if (nr_pages == nr_dest_pages) { if (nr_pages == nr_dest_pages) {
out_page = NULL; out_page = NULL;
ret = -E2BIG; ret = -E2BIG;
@ -221,7 +222,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cpage_out = page_address(out_page); cpage_out = kmap(out_page);
pages[nr_pages++] = out_page; pages[nr_pages++] = out_page;
pg_bytes_left = PAGE_SIZE; pg_bytes_left = PAGE_SIZE;
@ -243,11 +244,12 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
break; break;
bytes_left = len - tot_in; bytes_left = len - tot_in;
kunmap(in_page);
put_page(in_page); put_page(in_page);
start += PAGE_SIZE; start += PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT); in_page = find_get_page(mapping, start >> PAGE_SHIFT);
data_in = page_address(in_page); data_in = kmap(in_page);
in_len = min(bytes_left, PAGE_SIZE); in_len = min(bytes_left, PAGE_SIZE);
} }
@ -257,17 +259,22 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
} }
/* store the size of all chunks of compressed data */ /* store the size of all chunks of compressed data */
sizes_ptr = page_address(pages[0]); sizes_ptr = kmap_local_page(pages[0]);
write_compress_length(sizes_ptr, tot_out); write_compress_length(sizes_ptr, tot_out);
kunmap_local(sizes_ptr);
ret = 0; ret = 0;
*total_out = tot_out; *total_out = tot_out;
*total_in = tot_in; *total_in = tot_in;
out: out:
*out_pages = nr_pages; *out_pages = nr_pages;
if (out_page)
kunmap(out_page);
if (in_page) if (in_page) {
kunmap(in_page);
put_page(in_page); put_page(in_page);
}
return ret; return ret;
} }
@ -283,6 +290,7 @@ static void copy_compressed_segment(struct compressed_bio *cb,
u32 orig_in = *cur_in; u32 orig_in = *cur_in;
while (*cur_in < orig_in + len) { while (*cur_in < orig_in + len) {
char *kaddr;
struct page *cur_page; struct page *cur_page;
u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in), u32 copy_len = min_t(u32, PAGE_SIZE - offset_in_page(*cur_in),
orig_in + len - *cur_in); orig_in + len - *cur_in);
@ -290,9 +298,11 @@ static void copy_compressed_segment(struct compressed_bio *cb,
ASSERT(copy_len); ASSERT(copy_len);
cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE]; cur_page = cb->compressed_pages[*cur_in / PAGE_SIZE];
kaddr = kmap(cur_page);
memcpy(dest + *cur_in - orig_in, memcpy(dest + *cur_in - orig_in,
page_address(cur_page) + offset_in_page(*cur_in), kaddr + offset_in_page(*cur_in),
copy_len); copy_len);
kunmap(cur_page);
*cur_in += copy_len; *cur_in += copy_len;
} }
@ -303,6 +313,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
struct workspace *workspace = list_entry(ws, struct workspace, list); struct workspace *workspace = list_entry(ws, struct workspace, list);
const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
const u32 sectorsize = fs_info->sectorsize; const u32 sectorsize = fs_info->sectorsize;
char *kaddr;
int ret; int ret;
/* Compressed data length, can be unaligned */ /* Compressed data length, can be unaligned */
u32 len_in; u32 len_in;
@ -311,7 +322,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Bytes decompressed so far */ /* Bytes decompressed so far */
u32 cur_out = 0; u32 cur_out = 0;
len_in = read_compress_length(page_address(cb->compressed_pages[0])); kaddr = kmap(cb->compressed_pages[0]);
len_in = read_compress_length(kaddr);
kunmap(cb->compressed_pages[0]);
cur_in += LZO_LEN; cur_in += LZO_LEN;
/* /*
@ -344,9 +357,9 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ASSERT(cur_in / sectorsize == ASSERT(cur_in / sectorsize ==
(cur_in + LZO_LEN - 1) / sectorsize); (cur_in + LZO_LEN - 1) / sectorsize);
cur_page = cb->compressed_pages[cur_in / PAGE_SIZE]; cur_page = cb->compressed_pages[cur_in / PAGE_SIZE];
kaddr = kmap(cur_page);
ASSERT(cur_page); ASSERT(cur_page);
seg_len = read_compress_length(page_address(cur_page) + seg_len = read_compress_length(kaddr + offset_in_page(cur_in));
offset_in_page(cur_in));
cur_in += LZO_LEN; cur_in += LZO_LEN;
/* Copy the compressed segment payload into workspace */ /* Copy the compressed segment payload into workspace */
@ -431,7 +444,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
destlen = min_t(unsigned long, destlen, PAGE_SIZE); destlen = min_t(unsigned long, destlen, PAGE_SIZE);
bytes = min_t(unsigned long, destlen, out_len - start_byte); bytes = min_t(unsigned long, destlen, out_len - start_byte);
kaddr = page_address(dest_page); kaddr = kmap_local_page(dest_page);
memcpy(kaddr, workspace->buf + start_byte, bytes); memcpy(kaddr, workspace->buf + start_byte, bytes);
/* /*
@ -441,6 +454,7 @@ int lzo_decompress(struct list_head *ws, unsigned char *data_in,
*/ */
if (bytes < destlen) if (bytes < destlen)
memset(kaddr+bytes, 0, destlen-bytes); memset(kaddr+bytes, 0, destlen-bytes);
kunmap_local(kaddr);
out: out:
return ret; return ret;
} }

View File

@ -126,7 +126,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cpage_out = page_address(out_page); cpage_out = kmap(out_page);
pages[0] = out_page; pages[0] = out_page;
nr_pages = 1; nr_pages = 1;
@ -148,22 +148,26 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
int i; int i;
for (i = 0; i < in_buf_pages; i++) { for (i = 0; i < in_buf_pages; i++) {
if (in_page) if (in_page) {
kunmap(in_page);
put_page(in_page); put_page(in_page);
}
in_page = find_get_page(mapping, in_page = find_get_page(mapping,
start >> PAGE_SHIFT); start >> PAGE_SHIFT);
data_in = page_address(in_page); data_in = kmap(in_page);
memcpy(workspace->buf + i * PAGE_SIZE, memcpy(workspace->buf + i * PAGE_SIZE,
data_in, PAGE_SIZE); data_in, PAGE_SIZE);
start += PAGE_SIZE; start += PAGE_SIZE;
} }
workspace->strm.next_in = workspace->buf; workspace->strm.next_in = workspace->buf;
} else { } else {
if (in_page) if (in_page) {
kunmap(in_page);
put_page(in_page); put_page(in_page);
}
in_page = find_get_page(mapping, in_page = find_get_page(mapping,
start >> PAGE_SHIFT); start >> PAGE_SHIFT);
data_in = page_address(in_page); data_in = kmap(in_page);
start += PAGE_SIZE; start += PAGE_SIZE;
workspace->strm.next_in = data_in; workspace->strm.next_in = data_in;
} }
@ -192,6 +196,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
* the stream end if required * the stream end if required
*/ */
if (workspace->strm.avail_out == 0) { if (workspace->strm.avail_out == 0) {
kunmap(out_page);
if (nr_pages == nr_dest_pages) { if (nr_pages == nr_dest_pages) {
out_page = NULL; out_page = NULL;
ret = -E2BIG; ret = -E2BIG;
@ -202,7 +207,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cpage_out = page_address(out_page); cpage_out = kmap(out_page);
pages[nr_pages] = out_page; pages[nr_pages] = out_page;
nr_pages++; nr_pages++;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = PAGE_SIZE;
@ -229,6 +234,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out; goto out;
} else if (workspace->strm.avail_out == 0) { } else if (workspace->strm.avail_out == 0) {
/* get another page for the stream end */ /* get another page for the stream end */
kunmap(out_page);
if (nr_pages == nr_dest_pages) { if (nr_pages == nr_dest_pages) {
out_page = NULL; out_page = NULL;
ret = -E2BIG; ret = -E2BIG;
@ -239,7 +245,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
cpage_out = page_address(out_page); cpage_out = kmap(out_page);
pages[nr_pages] = out_page; pages[nr_pages] = out_page;
nr_pages++; nr_pages++;
workspace->strm.avail_out = PAGE_SIZE; workspace->strm.avail_out = PAGE_SIZE;
@ -258,8 +264,13 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = workspace->strm.total_in; *total_in = workspace->strm.total_in;
out: out:
*out_pages = nr_pages; *out_pages = nr_pages;
if (in_page) if (out_page)
kunmap(out_page);
if (in_page) {
kunmap(in_page);
put_page(in_page); put_page(in_page);
}
return ret; return ret;
} }
@ -276,7 +287,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
unsigned long buf_start; unsigned long buf_start;
struct page **pages_in = cb->compressed_pages; struct page **pages_in = cb->compressed_pages;
data_in = page_address(pages_in[page_in_index]); data_in = kmap(pages_in[page_in_index]);
workspace->strm.next_in = data_in; workspace->strm.next_in = data_in;
workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE); workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE);
workspace->strm.total_in = 0; workspace->strm.total_in = 0;
@ -298,6 +309,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) { if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
pr_warn("BTRFS: inflateInit failed\n"); pr_warn("BTRFS: inflateInit failed\n");
kunmap(pages_in[page_in_index]);
return -EIO; return -EIO;
} }
while (workspace->strm.total_in < srclen) { while (workspace->strm.total_in < srclen) {
@ -324,13 +336,13 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
if (workspace->strm.avail_in == 0) { if (workspace->strm.avail_in == 0) {
unsigned long tmp; unsigned long tmp;
kunmap(pages_in[page_in_index]);
page_in_index++; page_in_index++;
if (page_in_index >= total_pages_in) { if (page_in_index >= total_pages_in) {
data_in = NULL; data_in = NULL;
break; break;
} }
data_in = page_address(pages_in[page_in_index]); data_in = kmap(pages_in[page_in_index]);
workspace->strm.next_in = data_in; workspace->strm.next_in = data_in;
tmp = srclen - workspace->strm.total_in; tmp = srclen - workspace->strm.total_in;
workspace->strm.avail_in = min(tmp, PAGE_SIZE); workspace->strm.avail_in = min(tmp, PAGE_SIZE);
@ -342,6 +354,8 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ret = 0; ret = 0;
done: done:
zlib_inflateEnd(&workspace->strm); zlib_inflateEnd(&workspace->strm);
if (data_in)
kunmap(pages_in[page_in_index]);
if (!ret) if (!ret)
zero_fill_bio(cb->orig_bio); zero_fill_bio(cb->orig_bio);
return ret; return ret;

View File

@ -399,7 +399,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* map in the first page of input data */ /* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT); in_page = find_get_page(mapping, start >> PAGE_SHIFT);
workspace->in_buf.src = page_address(in_page); workspace->in_buf.src = kmap(in_page);
workspace->in_buf.pos = 0; workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
@ -411,7 +411,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out; goto out;
} }
pages[nr_pages++] = out_page; pages[nr_pages++] = out_page;
workspace->out_buf.dst = page_address(out_page); workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.pos = 0; workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
@ -446,6 +446,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
if (workspace->out_buf.pos == workspace->out_buf.size) { if (workspace->out_buf.pos == workspace->out_buf.size) {
tot_out += PAGE_SIZE; tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE; max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) { if (nr_pages == nr_dest_pages) {
out_page = NULL; out_page = NULL;
ret = -E2BIG; ret = -E2BIG;
@ -457,7 +458,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out; goto out;
} }
pages[nr_pages++] = out_page; pages[nr_pages++] = out_page;
workspace->out_buf.dst = page_address(out_page); workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.pos = 0; workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, workspace->out_buf.size = min_t(size_t, max_out,
PAGE_SIZE); PAGE_SIZE);
@ -472,12 +473,13 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
/* Check if we need more input */ /* Check if we need more input */
if (workspace->in_buf.pos == workspace->in_buf.size) { if (workspace->in_buf.pos == workspace->in_buf.size) {
tot_in += PAGE_SIZE; tot_in += PAGE_SIZE;
kunmap(in_page);
put_page(in_page); put_page(in_page);
start += PAGE_SIZE; start += PAGE_SIZE;
len -= PAGE_SIZE; len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT); in_page = find_get_page(mapping, start >> PAGE_SHIFT);
workspace->in_buf.src = page_address(in_page); workspace->in_buf.src = kmap(in_page);
workspace->in_buf.pos = 0; workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE);
} }
@ -504,6 +506,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
tot_out += PAGE_SIZE; tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE; max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) { if (nr_pages == nr_dest_pages) {
out_page = NULL; out_page = NULL;
ret = -E2BIG; ret = -E2BIG;
@ -515,7 +518,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
goto out; goto out;
} }
pages[nr_pages++] = out_page; pages[nr_pages++] = out_page;
workspace->out_buf.dst = page_address(out_page); workspace->out_buf.dst = kmap(out_page);
workspace->out_buf.pos = 0; workspace->out_buf.pos = 0;
workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
} }
@ -531,8 +534,12 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
out: out:
*out_pages = nr_pages; *out_pages = nr_pages;
/* Cleanup */ /* Cleanup */
if (in_page) if (in_page) {
kunmap(in_page);
put_page(in_page); put_page(in_page);
}
if (out_page)
kunmap(out_page);
return ret; return ret;
} }
@ -556,7 +563,7 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
goto done; goto done;
} }
workspace->in_buf.src = page_address(pages_in[page_in_index]); workspace->in_buf.src = kmap(pages_in[page_in_index]);
workspace->in_buf.pos = 0; workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
@ -592,14 +599,14 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
break; break;
if (workspace->in_buf.pos == workspace->in_buf.size) { if (workspace->in_buf.pos == workspace->in_buf.size) {
page_in_index++; kunmap(pages_in[page_in_index++]);
if (page_in_index >= total_pages_in) { if (page_in_index >= total_pages_in) {
workspace->in_buf.src = NULL; workspace->in_buf.src = NULL;
ret = -EIO; ret = -EIO;
goto done; goto done;
} }
srclen -= PAGE_SIZE; srclen -= PAGE_SIZE;
workspace->in_buf.src = page_address(pages_in[page_in_index]); workspace->in_buf.src = kmap(pages_in[page_in_index]);
workspace->in_buf.pos = 0; workspace->in_buf.pos = 0;
workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
} }
@ -607,6 +614,8 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
ret = 0; ret = 0;
zero_fill_bio(cb->orig_bio); zero_fill_bio(cb->orig_bio);
done: done:
if (workspace->in_buf.src)
kunmap(pages_in[page_in_index]);
return ret; return ret;
} }