netdma: adding alignment check for NETDMA ops
This is the fallout from adding memcpy alignment workaround for certain IOATDMA hardware. NetDMA will only use DMA engine that can handle byte align ops. Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Dave Jiang <dave.jiang@intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
f26df1a1a9
commit
a2bd1140a2
@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
|
||||
}
|
||||
EXPORT_SYMBOL(dma_find_channel);
|
||||
|
||||
/*
|
||||
* net_dma_find_channel - find a channel for net_dma
|
||||
* net_dma has alignment requirements
|
||||
*/
|
||||
struct dma_chan *net_dma_find_channel(void)
|
||||
{
|
||||
struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
|
||||
if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
|
||||
return NULL;
|
||||
|
||||
return chan;
|
||||
}
|
||||
EXPORT_SYMBOL(net_dma_find_channel);
|
||||
|
||||
/**
|
||||
* dma_issue_pending_all - flush all pending operations across all channels
|
||||
*/
|
||||
|
@ -948,6 +948,7 @@ int dma_async_device_register(struct dma_device *device);
|
||||
void dma_async_device_unregister(struct dma_device *device);
|
||||
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
||||
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
|
||||
struct dma_chan *net_dma_find_channel(void);
|
||||
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
|
||||
|
||||
/* --- Helper iov-locking functions --- */
|
||||
|
@ -1450,7 +1450,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
if ((available < target) &&
|
||||
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
||||
!sysctl_tcp_low_latency &&
|
||||
dma_find_channel(DMA_MEMCPY)) {
|
||||
net_dma_find_channel()) {
|
||||
preempt_enable_no_resched();
|
||||
tp->ucopy.pinned_list =
|
||||
dma_pin_iovec_pages(msg->msg_iov, len);
|
||||
@ -1665,7 +1665,7 @@ do_prequeue:
|
||||
if (!(flags & MSG_TRUNC)) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
|
||||
if (tp->ucopy.dma_chan) {
|
||||
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
|
||||
|
@ -5190,7 +5190,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
|
||||
return 0;
|
||||
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
|
||||
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
|
||||
|
||||
|
@ -1727,7 +1727,7 @@ process:
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
if (tp->ucopy.dma_chan)
|
||||
ret = tcp_v4_do_rcv(sk, skb);
|
||||
else
|
||||
|
@ -1755,7 +1755,7 @@ process:
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
if (tp->ucopy.dma_chan)
|
||||
ret = tcp_v6_do_rcv(sk, skb);
|
||||
else
|
||||
|
Loading…
Reference in New Issue
Block a user