RDMA/mlx5: Revise how the hysteresis scheme works for cache filling

Currently if the work queue is running then it is in 'hysteresis' mode and
will fill until the cache reaches the high water mark. This implicit state
is very tricky and doesn't interact with pending very well.

Instead of self re-scheduling the work queue after the add_keys() has
started to create the new MR, have the queue scheduled from
reg_mr_callback() only after the requested MR has been added.

This avoids the bad design of an in-rush of queue'd work doing back to
back add_keys() until EAGAIN then sleeping. The add_keys() will be paced
one at a time as they complete, slowly filling up the cache.

Also, fix pending to be only manipulated under lock.

Link: https://lore.kernel.org/r/20200310082238.239865-12-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Jason Gunthorpe 2020-03-10 10:22:37 +02:00
parent b9358bdbc7
commit 1c78a21a0c
2 changed files with 27 additions and 15 deletions

View File

@ -700,6 +700,7 @@ struct mlx5_cache_ent {
u32 page;
u8 disabled:1;
u8 fill_to_high_water:1;
/*
* - available_mrs is the length of list head, ie the number of MRs

View File

@ -86,6 +86,7 @@ mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
@ -134,11 +135,9 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context)
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
ent->total_mrs++;
/* If we are doing fill_to_high_water then keep going. */
queue_adjust_cache_locked(ent);
ent->pending--;
/*
* Creating is always done in response to some demand, so do not call
* queue_adjust_cache_locked().
*/
spin_unlock_irqrestore(&ent->lock, flags);
if (!completion_done(&ent->compl))
@ -384,11 +383,29 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
lockdep_assert_held(&ent->lock);
if (ent->disabled)
if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
return;
if (ent->available_mrs < ent->limit ||
ent->available_mrs > 2 * ent->limit)
if (ent->available_mrs < ent->limit) {
ent->fill_to_high_water = true;
queue_work(ent->dev->cache.wq, &ent->work);
} else if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit) {
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
queue_work(ent->dev->cache.wq, &ent->work);
} else if (ent->available_mrs == 2 * ent->limit) {
ent->fill_to_high_water = false;
} else if (ent->available_mrs > 2 * ent->limit) {
/* Queue deletion of excess entries */
ent->fill_to_high_water = false;
if (ent->pending)
queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
msecs_to_jiffies(1000));
else
queue_work(ent->dev->cache.wq, &ent->work);
}
}
static void __cache_work_func(struct mlx5_cache_ent *ent)
@ -401,11 +418,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
if (ent->disabled)
goto out;
if (ent->available_mrs + ent->pending < 2 * ent->limit &&
if (ent->fill_to_high_water &&
ent->available_mrs + ent->pending < 2 * ent->limit &&
!READ_ONCE(dev->fill_delay)) {
spin_unlock_irq(&ent->lock);
err = add_keys(ent, 1);
spin_lock_irq(&ent->lock);
if (ent->disabled)
goto out;
@ -424,12 +441,6 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
msecs_to_jiffies(1000));
}
}
/*
* Once we start populating due to hitting a low water mark
* continue until we pass the high water mark.
*/
if (ent->available_mrs + ent->pending < 2 * ent->limit)
queue_work(cache->wq, &ent->work);
} else if (ent->available_mrs > 2 * ent->limit) {
bool need_delay;