forked from Minki/linux
IB/ipath: Implement new verbs DMA mapping functions
This patch implements the interposing DMA mapping functions to allow support for IOMMUs and remove the dependence on phys_to_virt() and bus_to_virt(). Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
9b513090a3
commit
f2cbb660ed
@ -6,6 +6,7 @@ obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
|
||||
ib_ipath-y := \
|
||||
ipath_cq.o \
|
||||
ipath_diag.o \
|
||||
ipath_dma.o \
|
||||
ipath_driver.o \
|
||||
ipath_eeprom.o \
|
||||
ipath_file_ops.o \
|
||||
|
189
drivers/infiniband/hw/ipath/ipath_dma.c
Normal file
189
drivers/infiniband/hw/ipath/ipath_dma.c
Normal file
@ -0,0 +1,189 @@
|
||||
/*
|
||||
* Copyright (c) 2006 QLogic, Corporation. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
|
||||
#include "ipath_verbs.h"
|
||||
|
||||
#define BAD_DMA_ADDRESS ((u64) 0)
|
||||
|
||||
/*
|
||||
* The following functions implement driver specific replacements
|
||||
* for the ib_dma_*() functions.
|
||||
*
|
||||
* These functions return kernel virtual addresses instead of
|
||||
* device bus addresses since the driver uses the CPU to copy
|
||||
* data instead of using hardware DMA.
|
||||
*/
|
||||
|
||||
static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
|
||||
{
|
||||
return dma_addr == BAD_DMA_ADDRESS;
|
||||
}
|
||||
|
||||
static u64 ipath_dma_map_single(struct ib_device *dev,
|
||||
void *cpu_addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
return (u64) cpu_addr;
|
||||
}
|
||||
|
||||
static void ipath_dma_unmap_single(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
|
||||
static u64 ipath_dma_map_page(struct ib_device *dev,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
u64 addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
if (offset + size > PAGE_SIZE) {
|
||||
addr = BAD_DMA_ADDRESS;
|
||||
goto done;
|
||||
}
|
||||
|
||||
addr = (u64) page_address(page);
|
||||
if (addr)
|
||||
addr += offset;
|
||||
/* TODO: handle highmem pages */
|
||||
|
||||
done:
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void ipath_dma_unmap_page(struct ib_device *dev,
|
||||
u64 addr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
|
||||
int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
u64 addr;
|
||||
int i;
|
||||
int ret = nents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
|
||||
for (i = 0; i < nents; i++) {
|
||||
addr = (u64) page_address(sg[i].page);
|
||||
/* TODO: handle highmem pages */
|
||||
if (!addr) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ipath_unmap_sg(struct ib_device *dev,
|
||||
struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(!valid_dma_direction(direction));
|
||||
}
|
||||
|
||||
static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
|
||||
{
|
||||
u64 addr = (u64) page_address(sg->page);
|
||||
|
||||
if (addr)
|
||||
addr += sg->offset;
|
||||
return addr;
|
||||
}
|
||||
|
||||
static unsigned int ipath_sg_dma_len(struct ib_device *dev,
|
||||
struct scatterlist *sg)
|
||||
{
|
||||
return sg->length;
|
||||
}
|
||||
|
||||
static void ipath_sync_single_for_cpu(struct ib_device *dev,
|
||||
u64 addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void ipath_sync_single_for_device(struct ib_device *dev,
|
||||
u64 addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
|
||||
u64 *dma_handle, gfp_t flag)
|
||||
{
|
||||
struct page *p;
|
||||
void *addr = NULL;
|
||||
|
||||
p = alloc_pages(flag, get_order(size));
|
||||
if (p)
|
||||
addr = page_address(p);
|
||||
if (dma_handle)
|
||||
*dma_handle = (u64) addr;
|
||||
return addr;
|
||||
}
|
||||
|
||||
static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
{
|
||||
free_pages((unsigned long) cpu_addr, get_order(size));
|
||||
}
|
||||
|
||||
struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
|
||||
ipath_mapping_error,
|
||||
ipath_dma_map_single,
|
||||
ipath_dma_unmap_single,
|
||||
ipath_dma_map_page,
|
||||
ipath_dma_unmap_page,
|
||||
ipath_map_sg,
|
||||
ipath_unmap_sg,
|
||||
ipath_sg_dma_address,
|
||||
ipath_sg_dma_len,
|
||||
ipath_sync_single_for_cpu,
|
||||
ipath_sync_single_for_device,
|
||||
ipath_dma_alloc_coherent,
|
||||
ipath_dma_free_coherent
|
||||
};
|
@ -134,7 +134,7 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
|
||||
*/
|
||||
if (sge->lkey == 0) {
|
||||
isge->mr = NULL;
|
||||
isge->vaddr = bus_to_virt(sge->addr);
|
||||
isge->vaddr = (void *) sge->addr;
|
||||
isge->length = sge->length;
|
||||
isge->sge_length = sge->length;
|
||||
ret = 1;
|
||||
@ -202,12 +202,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We use RKEY == zero for physical addresses
|
||||
* (see ipath_get_dma_mr).
|
||||
* We use RKEY == zero for kernel virtual addresses
|
||||
* (see ipath_get_dma_mr and ipath_dma.c).
|
||||
*/
|
||||
if (rkey == 0) {
|
||||
sge->mr = NULL;
|
||||
sge->vaddr = phys_to_virt(vaddr);
|
||||
sge->vaddr = (void *) vaddr;
|
||||
sge->length = len;
|
||||
sge->sge_length = len;
|
||||
ss->sg_list = NULL;
|
||||
|
@ -54,6 +54,8 @@ static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
|
||||
* @acc: access flags
|
||||
*
|
||||
* Returns the memory region on success, otherwise returns an errno.
|
||||
* Note that all DMA addresses should be created via the
|
||||
* struct ib_dma_mapping_ops functions (see ipath_dma.c).
|
||||
*/
|
||||
struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
@ -149,8 +151,7 @@ struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd,
|
||||
m = 0;
|
||||
n = 0;
|
||||
for (i = 0; i < num_phys_buf; i++) {
|
||||
mr->mr.map[m]->segs[n].vaddr =
|
||||
phys_to_virt(buffer_list[i].addr);
|
||||
mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
|
||||
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
|
||||
mr->mr.length += buffer_list[i].size;
|
||||
n++;
|
||||
@ -347,7 +348,7 @@ int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
|
||||
n = 0;
|
||||
ps = 1 << fmr->page_shift;
|
||||
for (i = 0; i < list_len; i++) {
|
||||
fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]);
|
||||
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
|
||||
fmr->mr.map[m]->segs[n].length = ps;
|
||||
if (++n == IPATH_SEGSZ) {
|
||||
m++;
|
||||
|
@ -1599,6 +1599,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
|
||||
dev->detach_mcast = ipath_multicast_detach;
|
||||
dev->process_mad = ipath_process_mad;
|
||||
dev->mmap = ipath_mmap;
|
||||
dev->dma_ops = &ipath_dma_mapping_ops;
|
||||
|
||||
snprintf(dev->node_desc, sizeof(dev->node_desc),
|
||||
IPATH_IDSTR " %s", init_utsname()->nodename);
|
||||
|
@ -812,4 +812,6 @@ extern unsigned int ib_ipath_max_srq_wrs;
|
||||
|
||||
extern const u32 ib_ipath_rnr_table[];
|
||||
|
||||
extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
|
||||
|
||||
#endif /* IPATH_VERBS_H */
|
||||
|
Loading…
Reference in New Issue
Block a user