cxgb4: Synchronize access to mailbox

The issue comes when there are multiple threads attempting to use
the mailbox facility at the same time.
When DCB operations and interface up/down is run in a loop for every
0.1 sec, we observed mailbox collisions. And out of the two commands
one would fail with the present code, since we don't queue the second
command.

To overcome the above issue, added a queue to access the mailbox.
Whenever a mailbox command is issued add it to the queue. If its at
the head issue the mailbox command, else wait for the existing command
to complete. Usually command takes less than a milli-second to
complete.

Also timeout from the loop, if the command under execution takes
long time to run.

In reality, the number of mailbox access collisions is going to be
very rare since no one runs such abusive script.

Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Hariprasad Shenai 2017-01-06 08:47:20 +05:30 committed by David S. Miller
parent 4b92ea81cc
commit 4055ae5e6d
3 changed files with 69 additions and 1 deletions

View File

@ -787,6 +787,10 @@ struct vf_info {
bool pf_set_mac; bool pf_set_mac;
}; };
struct mbox_list {
struct list_head list;
};
struct adapter { struct adapter {
void __iomem *regs; void __iomem *regs;
void __iomem *bar2; void __iomem *bar2;
@ -849,6 +853,10 @@ struct adapter {
struct work_struct db_drop_task; struct work_struct db_drop_task;
bool tid_release_task_busy; bool tid_release_task_busy;
/* lock for mailbox cmd list */
spinlock_t mbox_lock;
struct mbox_list mlist;
/* support for mailbox command/reply logging */ /* support for mailbox command/reply logging */
#define T4_OS_LOG_MBOX_CMDS 256 #define T4_OS_LOG_MBOX_CMDS 256
struct mbox_cmd_log *mbox_log; struct mbox_cmd_log *mbox_log;

View File

@ -4707,6 +4707,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock); spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock); spin_lock_init(&adapter->win0_lock);
spin_lock_init(&adapter->mbox_lock);
INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list); INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full); INIT_WORK(&adapter->db_full_task, process_db_full);

View File

@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
}; };
struct mbox_list entry;
u16 access = 0; u16 access = 0;
u16 execute = 0; u16 execute = 0;
u32 v; u32 v;
@ -311,11 +312,61 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
timeout = -timeout; timeout = -timeout;
} }
/* Queue ourselves onto the mailbox access list. When our entry is at
* the front of the list, we have rights to access the mailbox. So we
* wait [for a while] till we're at the front [or bail out with an
* EBUSY] ...
*/
spin_lock(&adap->mbox_lock);
list_add_tail(&entry.list, &adap->mlist.list);
spin_unlock(&adap->mbox_lock);
delay_idx = 0;
ms = delay[0];
for (i = 0; ; i += ms) {
/* If we've waited too long, return a busy indication. This
* really ought to be based on our initial position in the
* mailbox access list but this is a start. We very rearely
* contend on access to the mailbox ...
*/
if (i > FW_CMD_MAX_TIMEOUT) {
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
/* If we're at the head, break out and start the mailbox
* protocol.
*/
if (list_first_entry(&adap->mlist.list, struct mbox_list,
list) == &entry)
break;
/* Delay for a bit before checking again ... */
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
delay_idx++;
msleep(ms);
} else {
mdelay(ms);
}
}
/* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
*/
v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV) { if (v != MBOX_OWNER_DRV) {
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
return ret; return ret;
@ -366,6 +417,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms; execute = i + ms;
t4_record_mbox(adap, cmd_rpl, t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute); MBOX_LEN, access, execute);
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res); return -FW_CMD_RETVAL_G((int)res);
} }
} }
@ -375,6 +429,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox); *(const u8 *)cmd, mbox);
t4_report_fw_error(adap); t4_report_fw_error(adap);
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
return ret; return ret;
} }