mirror of
https://github.com/torvalds/linux.git
synced 2024-12-14 23:25:54 +00:00
882214e2b1
* Add an interval tree implementation for ODP umems. Create an interval tree for each ucontext (including a count of the number of ODP MRs in this context, semaphore, etc.), and register ODP umems in the interval tree. * Add MMU notifiers handling functions, using the interval tree to notify only the relevant umems and underlying MRs. * Register to receive MMU notifier events from the MM subsystem upon ODP MR registration (and unregister accordingly). * Add a completion object to synchronize the destruction of ODP umems. * Add mechanism to abort page faults when there's a concurrent invalidation. The way we synchronize between concurrent invalidations and page faults is by keeping a counter of currently running invalidations, and a sequence number that is incremented whenever an invalidation is caught. The page fault code checks the counter and also verifies that the sequence number hasn't progressed before it updates the umem's page tables. This is similar to what the kvm module does. In order to prevent the case where we register a umem in the middle of an ongoing notifier, we also keep a per ucontext counter of the total number of active mmu notifiers. We only enable new umems when all the running notifiers complete. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Shachar Raindel <raindel@mellanox.com> Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Yuval Dagan <yuvalda@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
95 lines
3.1 KiB
C
95 lines
3.1 KiB
C
/*
|
|
* Copyright (c) 2014 Mellanox Technologies. All rights reserved.
|
|
*
|
|
* This software is available to you under a choice of one of two
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
* General Public License (GPL) Version 2, available from the file
|
|
* COPYING in the main directory of this source tree, or the
|
|
* OpenIB.org BSD license below:
|
|
*
|
|
* Redistribution and use in source and binary forms, with or
|
|
* without modification, are permitted provided that the following
|
|
* conditions are met:
|
|
*
|
|
* - Redistributions of source code must retain the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer.
|
|
*
|
|
* - Redistributions in binary form must reproduce the above
|
|
* copyright notice, this list of conditions and the following
|
|
* disclaimer in the documentation and/or other materials
|
|
* provided with the distribution.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
* SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/interval_tree_generic.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/gfp.h>
|
|
#include <rdma/ib_umem_odp.h>
|
|
|
|
/*
|
|
* The ib_umem list keeps track of memory regions for which the HW
|
|
* device request to receive notification when the related memory
|
|
* mapping is changed.
|
|
*
|
|
* ib_umem_lock protects the list.
|
|
*/
|
|
|
|
static inline u64 node_start(struct umem_odp_node *n)
|
|
{
|
|
struct ib_umem_odp *umem_odp =
|
|
container_of(n, struct ib_umem_odp, interval_tree);
|
|
|
|
return ib_umem_start(umem_odp->umem);
|
|
}
|
|
|
|
/* Note that the representation of the intervals in the interval tree
|
|
* considers the ending point as contained in the interval, while the
|
|
* function ib_umem_end returns the first address which is not contained
|
|
* in the umem.
|
|
*/
|
|
static inline u64 node_last(struct umem_odp_node *n)
|
|
{
|
|
struct ib_umem_odp *umem_odp =
|
|
container_of(n, struct ib_umem_odp, interval_tree);
|
|
|
|
return ib_umem_end(umem_odp->umem) - 1;
|
|
}
|
|
|
|
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
|
|
node_start, node_last, , rbt_ib_umem)
|
|
|
|
/* @last is not a part of the interval. See comment for function
|
|
* node_last.
|
|
*/
|
|
int rbt_ib_umem_for_each_in_range(struct rb_root *root,
|
|
u64 start, u64 last,
|
|
umem_call_back cb,
|
|
void *cookie)
|
|
{
|
|
int ret_val = 0;
|
|
struct umem_odp_node *node;
|
|
struct ib_umem_odp *umem;
|
|
|
|
if (unlikely(start == last))
|
|
return ret_val;
|
|
|
|
for (node = rbt_ib_umem_iter_first(root, start, last - 1); node;
|
|
node = rbt_ib_umem_iter_next(node, start, last - 1)) {
|
|
umem = container_of(node, struct ib_umem_odp, interval_tree);
|
|
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
|
|
}
|
|
|
|
return ret_val;
|
|
}
|