forked from Minki/linux
RDMA/siw: Convert siw_tx_hdt() to kmap_local_page()
kmap() is being deprecated and will break uses of device dax after PKS protection is introduced.[1] The use of kmap() in siw_tx_hdt() is all thread local therefore kmap_local_page() is a sufficient replacement and will work with pgmap protected pages when those are implemented. siw_tx_hdt() tracks pages used in a page_array. It uses that array to unmap pages which were mapped on function exit. Not all entries in the array are mapped and this is tracked in kmap_mask. kunmap_local() takes a mapped address rather than a page. Alter siw_unmap_pages() to take the iov array to reuse the iov_base address of each mapping. Use PAGE_MASK to get the proper address for kunmap_local(). kmap_local_page() mappings are tracked in a stack and must be unmapped in the opposite order they were mapped in. Because segments are mapped into the page array in increasing index order, modify siw_unmap_pages() to unmap pages in decreasing order. Use kmap_local_page() instead of kmap() to map pages in the page_array. [1] https://lore.kernel.org/lkml/20201009195033.3208459-59-ira.weiny@intel.com/ Link: https://lore.kernel.org/r/20210624174814.2822896-1-ira.weiny@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
parent
1ec50dd12a
commit
9d649d594f
@ -396,13 +396,20 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
|
||||
|
||||
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
|
||||
|
||||
static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
|
||||
static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len)
|
||||
{
|
||||
while (kmap_mask) {
|
||||
if (kmap_mask & BIT(0))
|
||||
kunmap(*pp);
|
||||
pp++;
|
||||
kmap_mask >>= 1;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Work backwards through the array to honor the kmap_local_page()
|
||||
* ordering requirements.
|
||||
*/
|
||||
for (i = (len-1); i >= 0; i--) {
|
||||
if (kmap_mask & BIT(i)) {
|
||||
unsigned long addr = (unsigned long)iov[i].iov_base;
|
||||
|
||||
kunmap_local((void *)(addr & PAGE_MASK));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -498,7 +505,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
p = siw_get_upage(mem->umem,
|
||||
sge->laddr + sge_off);
|
||||
if (unlikely(!p)) {
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
siw_unmap_pages(iov, kmap_mask, seg);
|
||||
wqe->processed -= c_tx->bytes_unsent;
|
||||
rv = -EFAULT;
|
||||
goto done_crc;
|
||||
@ -506,11 +513,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
page_array[seg] = p;
|
||||
|
||||
if (!c_tx->use_sendpage) {
|
||||
iov[seg].iov_base = kmap(p) + fp_off;
|
||||
iov[seg].iov_len = plen;
|
||||
void *kaddr = kmap_local_page(p);
|
||||
|
||||
/* Remember for later kunmap() */
|
||||
kmap_mask |= BIT(seg);
|
||||
iov[seg].iov_base = kaddr + fp_off;
|
||||
iov[seg].iov_len = plen;
|
||||
|
||||
if (do_crc)
|
||||
crypto_shash_update(
|
||||
@ -542,7 +550,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
|
||||
if (++seg > (int)MAX_ARRAY) {
|
||||
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
siw_unmap_pages(iov, kmap_mask, seg-1);
|
||||
wqe->processed -= c_tx->bytes_unsent;
|
||||
rv = -EMSGSIZE;
|
||||
goto done_crc;
|
||||
@ -593,7 +601,7 @@ sge_done:
|
||||
} else {
|
||||
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
|
||||
hdr_len + data_len + trl_len);
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
siw_unmap_pages(iov, kmap_mask, seg);
|
||||
}
|
||||
if (rv < (int)hdr_len) {
|
||||
/* Not even complete hdr pushed or negative rv */
|
||||
|
Loading…
Reference in New Issue
Block a user