RDMA/siw: Convert siw_tx_hdt() to kmap_local_page()

kmap() is being deprecated and will break uses of device dax after PKS
protection is introduced.[1]

The use of kmap() in siw_tx_hdt() is all thread local therefore
kmap_local_page() is a sufficient replacement and will work with pgmap
protected pages when those are implemented.

siw_tx_hdt() tracks pages used in a page_array.  It uses that array to
unmap pages which were mapped on function exit.  Not all entries in the
array are mapped and this is tracked in kmap_mask.

kunmap_local() takes a mapped address rather than a page.  Alter
siw_unmap_pages() to take the iov array to reuse the iov_base address of
each mapping.  Use PAGE_MASK to get the proper address for kunmap_local().

kmap_local_page() mappings are tracked in a stack and must be unmapped in
the opposite order they were mapped in.  Because segments are mapped into
the page array in increasing index order, modify siw_unmap_pages() to
unmap pages in decreasing order.

Use kmap_local_page() instead of kmap() to map pages in the page_array.

[1] https://lore.kernel.org/lkml/20201009195033.3208459-59-ira.weiny@intel.com/

Link: https://lore.kernel.org/r/20210624174814.2822896-1-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: Bernard Metzler <bmt@zurich.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
This commit is contained in:
Ira Weiny 2021-06-24 10:48:14 -07:00 committed by Jason Gunthorpe
parent 1ec50dd12a
commit 9d649d594f

View File

@ -396,13 +396,20 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
#define MAX_TRAILER (MPA_CRC_SIZE + 4) #define MAX_TRAILER (MPA_CRC_SIZE + 4)
static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask) static void siw_unmap_pages(struct kvec *iov, unsigned long kmap_mask, int len)
{ {
while (kmap_mask) { int i;
if (kmap_mask & BIT(0))
kunmap(*pp); /*
pp++; * Work backwards through the array to honor the kmap_local_page()
kmap_mask >>= 1; * ordering requirements.
*/
for (i = (len-1); i >= 0; i--) {
if (kmap_mask & BIT(i)) {
unsigned long addr = (unsigned long)iov[i].iov_base;
kunmap_local((void *)(addr & PAGE_MASK));
}
} }
} }
@ -498,7 +505,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
p = siw_get_upage(mem->umem, p = siw_get_upage(mem->umem,
sge->laddr + sge_off); sge->laddr + sge_off);
if (unlikely(!p)) { if (unlikely(!p)) {
siw_unmap_pages(page_array, kmap_mask); siw_unmap_pages(iov, kmap_mask, seg);
wqe->processed -= c_tx->bytes_unsent; wqe->processed -= c_tx->bytes_unsent;
rv = -EFAULT; rv = -EFAULT;
goto done_crc; goto done_crc;
@ -506,11 +513,12 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
page_array[seg] = p; page_array[seg] = p;
if (!c_tx->use_sendpage) { if (!c_tx->use_sendpage) {
iov[seg].iov_base = kmap(p) + fp_off; void *kaddr = kmap_local_page(p);
iov[seg].iov_len = plen;
/* Remember for later kunmap() */ /* Remember for later kunmap() */
kmap_mask |= BIT(seg); kmap_mask |= BIT(seg);
iov[seg].iov_base = kaddr + fp_off;
iov[seg].iov_len = plen;
if (do_crc) if (do_crc)
crypto_shash_update( crypto_shash_update(
@ -542,7 +550,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
if (++seg > (int)MAX_ARRAY) { if (++seg > (int)MAX_ARRAY) {
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
siw_unmap_pages(page_array, kmap_mask); siw_unmap_pages(iov, kmap_mask, seg-1);
wqe->processed -= c_tx->bytes_unsent; wqe->processed -= c_tx->bytes_unsent;
rv = -EMSGSIZE; rv = -EMSGSIZE;
goto done_crc; goto done_crc;
@ -593,7 +601,7 @@ sge_done:
} else { } else {
rv = kernel_sendmsg(s, &msg, iov, seg + 1, rv = kernel_sendmsg(s, &msg, iov, seg + 1,
hdr_len + data_len + trl_len); hdr_len + data_len + trl_len);
siw_unmap_pages(page_array, kmap_mask); siw_unmap_pages(iov, kmap_mask, seg);
} }
if (rv < (int)hdr_len) { if (rv < (int)hdr_len) {
/* Not even complete hdr pushed or negative rv */ /* Not even complete hdr pushed or negative rv */