RDMA/cache: Move the cache per-port data into the main ib_port_data

Like the other cases there no real reason to have another array just for
the cache. This larger conversion gets its own patch.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2019-02-12 21:12:49 -07:00
parent 8ceb1357b3
commit 8faea9fd4a
2 changed files with 33 additions and 59 deletions

View File

@ -185,7 +185,7 @@ EXPORT_SYMBOL(ib_cache_gid_parse_type_str);
static struct ib_gid_table *rdma_gid_table(struct ib_device *device, u8 port)
{
return device->cache.ports[port - rdma_start_port(device)].gid;
return device->port_data[port].cache.gid;
}
static bool is_gid_entry_free(const struct ib_gid_table_entry *entry)
@ -765,7 +765,7 @@ err_free_table:
return NULL;
}
static void release_gid_table(struct ib_device *device, u8 port,
static void release_gid_table(struct ib_device *device,
struct ib_gid_table *table)
{
bool leak = false;
@ -863,31 +863,27 @@ static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
static void gid_table_release_one(struct ib_device *ib_dev)
{
struct ib_gid_table *table;
u8 port;
unsigned int p;
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
table = ib_dev->cache.ports[port].gid;
release_gid_table(ib_dev, port, table);
ib_dev->cache.ports[port].gid = NULL;
rdma_for_each_port (ib_dev, p) {
release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid);
ib_dev->port_data[p].cache.gid = NULL;
}
}
static int _gid_table_setup_one(struct ib_device *ib_dev)
{
u8 port;
struct ib_gid_table *table;
unsigned int rdma_port;
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
u8 rdma_port = port + rdma_start_port(ib_dev);
rdma_for_each_port (ib_dev, rdma_port) {
table = alloc_gid_table(
ib_dev->port_data[rdma_port].immutable.gid_tbl_len);
if (!table)
goto rollback_table_setup;
gid_table_reserve_default(ib_dev, rdma_port, table);
ib_dev->cache.ports[port].gid = table;
ib_dev->port_data[rdma_port].cache.gid = table;
}
return 0;
@ -898,14 +894,11 @@ rollback_table_setup:
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
struct ib_gid_table *table;
u8 port;
unsigned int p;
for (port = 0; port < ib_dev->phys_port_cnt; port++) {
table = ib_dev->cache.ports[port].gid;
cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
table);
}
rdma_for_each_port (ib_dev, p)
cleanup_gid_table_port(ib_dev, p,
ib_dev->port_data[p].cache.gid);
}
static int gid_table_setup_one(struct ib_device *ib_dev)
@ -983,17 +976,17 @@ const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
struct ib_gid_attr gid_attr_val = {.ndev = ndev, .gid_type = gid_type};
u8 p;
unsigned int p;
if (ndev)
mask |= GID_ATTR_FIND_MASK_NETDEV;
for (p = 0; p < device->phys_port_cnt; p++) {
rdma_for_each_port(device, p) {
struct ib_gid_table *table;
unsigned long flags;
int index;
table = device->cache.ports[p].gid;
table = device->port_data[p].cache.gid;
read_lock_irqsave(&table->rwlock, flags);
index = find_gid(table, gid, &gid_attr_val, false, mask, NULL);
if (index >= 0) {
@ -1025,7 +1018,7 @@ int ib_get_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
cache = device->port_data[port_num].cache.pkey;
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
@ -1043,14 +1036,12 @@ int ib_get_cached_subnet_prefix(struct ib_device *device,
u64 *sn_pfx)
{
unsigned long flags;
int p;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
p = port_num - rdma_start_port(device);
read_lock_irqsave(&device->cache.lock, flags);
*sn_pfx = device->cache.ports[p].subnet_prefix;
*sn_pfx = device->port_data[port_num].cache.subnet_prefix;
read_unlock_irqrestore(&device->cache.lock, flags);
return 0;
@ -1073,7 +1064,7 @@ int ib_find_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
cache = device->port_data[port_num].cache.pkey;
*index = -1;
@ -1113,7 +1104,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.ports[port_num - rdma_start_port(device)].pkey;
cache = device->port_data[port_num].cache.pkey;
*index = -1;
@ -1141,7 +1132,7 @@ int ib_get_cached_lmc(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
*lmc = device->cache.ports[port_num - rdma_start_port(device)].lmc;
*lmc = device->port_data[port_num].cache.lmc;
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@ -1159,8 +1150,7 @@ int ib_get_cached_port_state(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
*port_state = device->cache.ports[port_num
- rdma_start_port(device)].port_state;
*port_state = device->port_data[port_num].cache.port_state;
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@ -1361,16 +1351,13 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.ports[port -
rdma_start_port(device)].pkey;
old_pkey_cache = device->port_data[port].cache.pkey;
device->cache.ports[port - rdma_start_port(device)].pkey = pkey_cache;
device->cache.ports[port - rdma_start_port(device)].lmc = tprops->lmc;
device->cache.ports[port - rdma_start_port(device)].port_state =
tprops->state;
device->port_data[port].cache.pkey = pkey_cache;
device->port_data[port].cache.lmc = tprops->lmc;
device->port_data[port].cache.port_state = tprops->state;
device->cache.ports[port - rdma_start_port(device)].subnet_prefix =
tprops->subnet_prefix;
device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix;
write_unlock_irq(&device->cache.lock);
if (enforce_security)
@ -1433,19 +1420,9 @@ int ib_cache_setup_one(struct ib_device *device)
rwlock_init(&device->cache.lock);
device->cache.ports =
kcalloc(rdma_end_port(device) - rdma_start_port(device) + 1,
sizeof(*device->cache.ports),
GFP_KERNEL);
if (!device->cache.ports)
return -ENOMEM;
err = gid_table_setup_one(device);
if (err) {
kfree(device->cache.ports);
device->cache.ports = NULL;
if (err)
return err;
}
rdma_for_each_port (device, p)
ib_cache_update(device, p, true);
@ -1458,10 +1435,7 @@ int ib_cache_setup_one(struct ib_device *device)
void ib_cache_release_one(struct ib_device *device)
{
int p;
if (!device->cache.ports)
return;
unsigned int p;
/*
* The release function frees all the cache elements.
@ -1469,11 +1443,10 @@ void ib_cache_release_one(struct ib_device *device)
* all the device's resources when the cache could no
* longer be accessed.
*/
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
kfree(device->cache.ports[p].pkey);
rdma_for_each_port (device, p)
kfree(device->port_data[p].cache.pkey);
gid_table_release_one(device);
kfree(device->cache.ports);
}
void ib_cache_cleanup_one(struct ib_device *device)

View File

@ -2186,7 +2186,6 @@ struct ib_port_cache {
struct ib_cache {
rwlock_t lock;
struct ib_event_handler event_handler;
struct ib_port_cache *ports;
};
struct iw_cm_verbs;
@ -2203,6 +2202,8 @@ struct ib_port_data {
spinlock_t pkey_list_lock;
struct list_head pkey_list;
struct ib_port_cache cache;
};
/* rdma netdev type - specifies protocol type */