2018-01-26 18:50:27 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-04-10 13:55:10 +00:00
|
|
|
/**
|
|
|
|
* PCI Endpoint *Controller* Address Space Management
|
|
|
|
*
|
|
|
|
* Copyright (C) 2017 Texas Instruments
|
|
|
|
* Author: Kishon Vijay Abraham I <kishon@ti.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
|
|
#include <linux/pci-epc.h>
|
|
|
|
|
|
|
|
/**
|
2017-08-18 14:57:56 +00:00
|
|
|
* pci_epc_mem_get_order() - determine the allocation order of a memory size
|
|
|
|
* @mem: address space of the endpoint controller
|
|
|
|
* @size: the size for which to get the order
|
|
|
|
*
|
|
|
|
* Reimplement get_order() for mem->page_size since the generic get_order
|
|
|
|
* always gets order with a constant PAGE_SIZE.
|
|
|
|
*/
|
|
|
|
static int pci_epc_mem_get_order(struct pci_epc_mem *mem, size_t size)
|
|
|
|
{
|
|
|
|
int order;
|
2020-05-07 12:33:16 +00:00
|
|
|
unsigned int page_shift = ilog2(mem->window.page_size);
|
2017-08-18 14:57:56 +00:00
|
|
|
|
|
|
|
size--;
|
|
|
|
size >>= page_shift;
|
|
|
|
#if BITS_PER_LONG == 32
|
|
|
|
order = fls(size);
|
|
|
|
#else
|
|
|
|
order = fls64(size);
|
|
|
|
#endif
|
|
|
|
return order;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2020-05-07 12:33:16 +00:00
|
|
|
* pci_epc_multi_mem_init() - initialize the pci_epc_mem structure
|
2017-04-10 13:55:10 +00:00
|
|
|
* @epc: the EPC device that invoked pci_epc_mem_init
|
2020-05-07 12:33:16 +00:00
|
|
|
* @windows: pointer to windows supported by the device
|
|
|
|
* @num_windows: number of windows device supports
|
2017-04-10 13:55:10 +00:00
|
|
|
*
|
|
|
|
* Invoke to initialize the pci_epc_mem structure used by the
|
|
|
|
* endpoint functions to allocate mapped PCI address.
|
|
|
|
*/
|
2020-05-07 12:33:16 +00:00
|
|
|
int pci_epc_multi_mem_init(struct pci_epc *epc,
|
|
|
|
struct pci_epc_mem_window *windows,
|
|
|
|
unsigned int num_windows)
|
2017-04-10 13:55:10 +00:00
|
|
|
{
|
2020-05-07 12:33:16 +00:00
|
|
|
struct pci_epc_mem *mem = NULL;
|
|
|
|
unsigned long *bitmap = NULL;
|
2017-08-18 14:57:56 +00:00
|
|
|
unsigned int page_shift;
|
2020-05-07 12:33:16 +00:00
|
|
|
size_t page_size;
|
2017-08-18 14:57:56 +00:00
|
|
|
int bitmap_size;
|
2020-05-07 12:33:16 +00:00
|
|
|
int pages;
|
|
|
|
int ret;
|
|
|
|
int i;
|
2017-08-18 14:57:56 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
epc->num_windows = 0;
|
2017-08-18 14:57:56 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
if (!windows || !num_windows)
|
|
|
|
return -EINVAL;
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
epc->windows = kcalloc(num_windows, sizeof(*epc->windows), GFP_KERNEL);
|
|
|
|
if (!epc->windows)
|
|
|
|
return -ENOMEM;
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
for (i = 0; i < num_windows; i++) {
|
|
|
|
page_size = windows[i].page_size;
|
|
|
|
if (page_size < PAGE_SIZE)
|
|
|
|
page_size = PAGE_SIZE;
|
|
|
|
page_shift = ilog2(page_size);
|
|
|
|
pages = windows[i].size >> page_shift;
|
|
|
|
bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
|
|
|
|
if (!mem) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
i--;
|
|
|
|
goto err_mem;
|
|
|
|
}
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
|
|
if (!bitmap) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
kfree(mem);
|
|
|
|
i--;
|
|
|
|
goto err_mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
mem->window.phys_base = windows[i].phys_base;
|
|
|
|
mem->window.size = windows[i].size;
|
|
|
|
mem->window.page_size = page_size;
|
|
|
|
mem->bitmap = bitmap;
|
|
|
|
mem->pages = pages;
|
|
|
|
mutex_init(&mem->lock);
|
|
|
|
epc->windows[i] = mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
epc->mem = epc->windows[0];
|
|
|
|
epc->num_windows = num_windows;
|
2017-04-10 13:55:10 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_mem:
|
2020-05-07 12:33:16 +00:00
|
|
|
for (; i >= 0; i--) {
|
|
|
|
mem = epc->windows[i];
|
|
|
|
kfree(mem->bitmap);
|
|
|
|
kfree(mem);
|
|
|
|
}
|
|
|
|
kfree(epc->windows);
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
return ret;
|
2017-04-10 13:55:10 +00:00
|
|
|
}
|
2020-05-07 12:33:16 +00:00
|
|
|
EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:15 +00:00
|
|
|
int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
|
|
|
|
size_t size, size_t page_size)
|
|
|
|
{
|
2020-05-07 12:33:16 +00:00
|
|
|
struct pci_epc_mem_window mem_window;
|
|
|
|
|
|
|
|
mem_window.phys_base = base;
|
|
|
|
mem_window.size = size;
|
|
|
|
mem_window.page_size = page_size;
|
|
|
|
|
|
|
|
return pci_epc_multi_mem_init(epc, &mem_window, 1);
|
2020-05-07 12:33:15 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pci_epc_mem_init);
|
|
|
|
|
2017-04-10 13:55:10 +00:00
|
|
|
/**
|
|
|
|
* pci_epc_mem_exit() - cleanup the pci_epc_mem structure
|
|
|
|
* @epc: the EPC device that invoked pci_epc_mem_exit
|
|
|
|
*
|
|
|
|
* Invoke to cleanup the pci_epc_mem structure allocated in
|
|
|
|
* pci_epc_mem_init().
|
|
|
|
*/
|
|
|
|
void pci_epc_mem_exit(struct pci_epc *epc)
|
|
|
|
{
|
2020-05-07 12:33:16 +00:00
|
|
|
struct pci_epc_mem *mem;
|
|
|
|
int i;
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
if (!epc->num_windows)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < epc->num_windows; i++) {
|
|
|
|
mem = epc->windows[i];
|
|
|
|
kfree(mem->bitmap);
|
|
|
|
kfree(mem);
|
|
|
|
}
|
|
|
|
kfree(epc->windows);
|
|
|
|
|
|
|
|
epc->windows = NULL;
|
2017-04-10 13:55:10 +00:00
|
|
|
epc->mem = NULL;
|
2020-05-07 12:33:16 +00:00
|
|
|
epc->num_windows = 0;
|
2017-04-10 13:55:10 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pci_epc_mem_exit);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* pci_epc_mem_alloc_addr() - allocate memory address from EPC addr space
|
|
|
|
* @epc: the EPC device on which memory has to be allocated
|
|
|
|
* @phys_addr: populate the allocated physical address here
|
|
|
|
* @size: the size of the address space that has to be allocated
|
|
|
|
*
|
|
|
|
* Invoke to allocate memory address from the EPC address space. This
|
|
|
|
* is usually done to map the remote RC address into the local system.
|
|
|
|
*/
|
|
|
|
void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
|
|
|
|
phys_addr_t *phys_addr, size_t size)
|
|
|
|
{
|
2020-02-24 09:53:36 +00:00
|
|
|
void __iomem *virt_addr = NULL;
|
2020-05-07 12:33:16 +00:00
|
|
|
struct pci_epc_mem *mem;
|
|
|
|
unsigned int page_shift;
|
|
|
|
size_t align_size;
|
|
|
|
int pageno;
|
2017-08-18 14:57:56 +00:00
|
|
|
int order;
|
2020-05-07 12:33:16 +00:00
|
|
|
int i;
|
2017-08-18 14:57:56 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
for (i = 0; i < epc->num_windows; i++) {
|
|
|
|
mem = epc->windows[i];
|
|
|
|
mutex_lock(&mem->lock);
|
|
|
|
align_size = ALIGN(size, mem->window.page_size);
|
|
|
|
order = pci_epc_mem_get_order(mem, align_size);
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
pageno = bitmap_find_free_region(mem->bitmap, mem->pages,
|
|
|
|
order);
|
|
|
|
if (pageno >= 0) {
|
|
|
|
page_shift = ilog2(mem->window.page_size);
|
|
|
|
*phys_addr = mem->window.phys_base +
|
|
|
|
((phys_addr_t)pageno << page_shift);
|
|
|
|
virt_addr = ioremap(*phys_addr, align_size);
|
|
|
|
if (!virt_addr) {
|
|
|
|
bitmap_release_region(mem->bitmap,
|
|
|
|
pageno, order);
|
|
|
|
mutex_unlock(&mem->lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
mutex_unlock(&mem->lock);
|
|
|
|
return virt_addr;
|
|
|
|
}
|
|
|
|
mutex_unlock(&mem->lock);
|
|
|
|
}
|
2017-04-10 13:55:10 +00:00
|
|
|
|
|
|
|
return virt_addr;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pci_epc_mem_alloc_addr);
|
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
static struct pci_epc_mem *pci_epc_get_matching_window(struct pci_epc *epc,
|
|
|
|
phys_addr_t phys_addr)
|
|
|
|
{
|
|
|
|
struct pci_epc_mem *mem;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < epc->num_windows; i++) {
|
|
|
|
mem = epc->windows[i];
|
|
|
|
|
|
|
|
if (phys_addr >= mem->window.phys_base &&
|
|
|
|
phys_addr < (mem->window.phys_base + mem->window.size))
|
|
|
|
return mem;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-04-10 13:55:10 +00:00
|
|
|
/**
|
|
|
|
* pci_epc_mem_free_addr() - free the allocated memory address
|
|
|
|
* @epc: the EPC device on which memory was allocated
|
|
|
|
* @phys_addr: the allocated physical address
|
|
|
|
* @virt_addr: virtual address of the allocated mem space
|
|
|
|
* @size: the size of the allocated address space
|
|
|
|
*
|
|
|
|
* Invoke to free the memory allocated using pci_epc_mem_alloc_addr.
|
|
|
|
*/
|
|
|
|
void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
|
|
|
|
void __iomem *virt_addr, size_t size)
|
|
|
|
{
|
2020-05-07 12:33:16 +00:00
|
|
|
struct pci_epc_mem *mem;
|
|
|
|
unsigned int page_shift;
|
|
|
|
size_t page_size;
|
2017-04-10 13:55:10 +00:00
|
|
|
int pageno;
|
2017-08-18 14:57:56 +00:00
|
|
|
int order;
|
2017-04-10 13:55:10 +00:00
|
|
|
|
2020-05-07 12:33:16 +00:00
|
|
|
mem = pci_epc_get_matching_window(epc, phys_addr);
|
|
|
|
if (!mem) {
|
|
|
|
pr_err("failed to get matching window\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
page_size = mem->window.page_size;
|
|
|
|
page_shift = ilog2(page_size);
|
2017-04-10 13:55:10 +00:00
|
|
|
iounmap(virt_addr);
|
2020-05-07 12:33:16 +00:00
|
|
|
pageno = (phys_addr - mem->window.phys_base) >> page_shift;
|
|
|
|
size = ALIGN(size, page_size);
|
2017-08-18 14:57:56 +00:00
|
|
|
order = pci_epc_mem_get_order(mem, size);
|
2020-02-24 09:53:36 +00:00
|
|
|
mutex_lock(&mem->lock);
|
2017-04-10 13:55:10 +00:00
|
|
|
bitmap_release_region(mem->bitmap, pageno, order);
|
2020-02-24 09:53:36 +00:00
|
|
|
mutex_unlock(&mem->lock);
|
2017-04-10 13:55:10 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
|
|
|
|
|
|
|
|
MODULE_DESCRIPTION("PCI EPC Address Space Management");
|
|
|
|
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
|
|
|
|
MODULE_LICENSE("GPL v2");
|