forked from Minki/linux
vmbus: keep pointer to ring buffer page
Avoid going from struct page to virt address (and back) by just keeping pointer to the allocated pages instead of virt address. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
800b932969
commit
52a42c2a90
@ -91,11 +91,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||||
unsigned long flags;
|
||||
int ret, err = 0;
|
||||
struct page *page;
|
||||
unsigned int order;
|
||||
|
||||
if (send_ringbuffer_size % PAGE_SIZE ||
|
||||
recv_ringbuffer_size % PAGE_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
|
||||
|
||||
spin_lock_irqsave(&newchannel->lock, flags);
|
||||
if (newchannel->state == CHANNEL_OPEN_STATE) {
|
||||
newchannel->state = CHANNEL_OPENING_STATE;
|
||||
@ -110,21 +113,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||||
|
||||
/* Allocate the ring buffer */
|
||||
page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
|
||||
GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(send_ringbuffer_size +
|
||||
recv_ringbuffer_size));
|
||||
GFP_KERNEL|__GFP_ZERO, order);
|
||||
|
||||
if (!page)
|
||||
page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
|
||||
get_order(send_ringbuffer_size +
|
||||
recv_ringbuffer_size));
|
||||
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
|
||||
|
||||
if (!page) {
|
||||
err = -ENOMEM;
|
||||
goto error_set_chnstate;
|
||||
}
|
||||
|
||||
newchannel->ringbuffer_pages = page_address(page);
|
||||
newchannel->ringbuffer_page = page;
|
||||
newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
|
||||
recv_ringbuffer_size) >> PAGE_SHIFT;
|
||||
|
||||
@ -239,8 +238,7 @@ error_free_gpadl:
|
||||
error_free_pages:
|
||||
hv_ringbuffer_cleanup(&newchannel->outbound);
|
||||
hv_ringbuffer_cleanup(&newchannel->inbound);
|
||||
__free_pages(page,
|
||||
get_order(send_ringbuffer_size + recv_ringbuffer_size));
|
||||
__free_pages(page, order);
|
||||
error_set_chnstate:
|
||||
newchannel->state = CHANNEL_OPEN_STATE;
|
||||
return err;
|
||||
@ -658,8 +656,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
||||
hv_ringbuffer_cleanup(&channel->outbound);
|
||||
hv_ringbuffer_cleanup(&channel->inbound);
|
||||
|
||||
free_pages((unsigned long)channel->ringbuffer_pages,
|
||||
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
|
||||
__free_pages(channel->ringbuffer_page,
|
||||
get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -130,11 +130,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
|
||||
= container_of(kobj, struct vmbus_channel, kobj);
|
||||
struct hv_device *dev = channel->primary_channel->device_obj;
|
||||
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
||||
void *ring_buffer = page_address(channel->ringbuffer_page);
|
||||
|
||||
dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
|
||||
q_idx, vma_pages(vma), vma->vm_pgoff);
|
||||
|
||||
return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
|
||||
return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
|
||||
channel->ringbuffer_pagecount << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
@ -223,7 +224,7 @@ hv_uio_probe(struct hv_device *dev,
|
||||
/* mem resources */
|
||||
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
|
||||
pdata->info.mem[TXRX_RING_MAP].addr
|
||||
= (uintptr_t)dev->channel->ringbuffer_pages;
|
||||
= (uintptr_t)page_address(dev->channel->ringbuffer_page);
|
||||
pdata->info.mem[TXRX_RING_MAP].size
|
||||
= dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
|
||||
pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
|
||||
|
@ -739,7 +739,7 @@ struct vmbus_channel {
|
||||
u32 ringbuffer_gpadlhandle;
|
||||
|
||||
/* Allocated memory for ring buffer */
|
||||
void *ringbuffer_pages;
|
||||
struct page *ringbuffer_page;
|
||||
u32 ringbuffer_pagecount;
|
||||
struct hv_ring_buffer_info outbound; /* send to parent */
|
||||
struct hv_ring_buffer_info inbound; /* receive from parent */
|
||||
|
Loading…
Reference in New Issue
Block a user