2015-07-17 14:38:13 +00:00
|
|
|
/*
|
|
|
|
* (C) 2001 Clemson University and The University of Chicago
|
|
|
|
*
|
|
|
|
* See COPYING in top-level directory.
|
|
|
|
*/
|
|
|
|
#include "protocol.h"
|
2015-12-04 17:56:14 +00:00
|
|
|
#include "orangefs-kernel.h"
|
|
|
|
#include "orangefs-bufmap.h"
|
2015-07-17 14:38:13 +00:00
|
|
|
|
2016-02-14 02:01:21 +00:00
|
|
|
struct slot_map {
|
|
|
|
int c;
|
|
|
|
wait_queue_head_t q;
|
|
|
|
int count;
|
|
|
|
unsigned long *map;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct slot_map rw_map = {
|
|
|
|
.c = -1,
|
|
|
|
.q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q)
|
|
|
|
};
|
|
|
|
static struct slot_map readdir_map = {
|
|
|
|
.c = -1,
|
|
|
|
.q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q)
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void install(struct slot_map *m, int count, unsigned long *map)
|
|
|
|
{
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
m->c = m->count = count;
|
|
|
|
m->map = map;
|
|
|
|
wake_up_all_locked(&m->q);
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_killed(struct slot_map *m)
|
|
|
|
{
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
m->c -= m->count + 1;
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void run_down(struct slot_map *m)
|
|
|
|
{
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
if (m->c != -1) {
|
|
|
|
for (;;) {
|
|
|
|
if (likely(list_empty(&wait.task_list)))
|
|
|
|
__add_wait_queue_tail(&m->q, &wait);
|
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
|
|
|
|
if (m->c == -1)
|
|
|
|
break;
|
|
|
|
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
schedule();
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
}
|
|
|
|
__remove_wait_queue(&m->q, &wait);
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
}
|
|
|
|
m->map = NULL;
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void put(struct slot_map *m, int slot)
|
|
|
|
{
|
|
|
|
int v;
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
__clear_bit(slot, m->map);
|
|
|
|
v = ++m->c;
|
|
|
|
if (unlikely(v == 1)) /* no free slots -> one free slot */
|
|
|
|
wake_up_locked(&m->q);
|
|
|
|
else if (unlikely(v == -1)) /* finished dying */
|
|
|
|
wake_up_all_locked(&m->q);
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wait_for_free(struct slot_map *m)
|
|
|
|
{
|
|
|
|
long left = slot_timeout_secs * HZ;
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
|
|
do {
|
|
|
|
long n = left, t;
|
|
|
|
if (likely(list_empty(&wait.task_list)))
|
|
|
|
__add_wait_queue_tail_exclusive(&m->q, &wait);
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
|
|
|
|
if (m->c > 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (m->c < 0) {
|
|
|
|
/* we are waiting for map to be installed */
|
|
|
|
/* it would better be there soon, or we go away */
|
|
|
|
if (n > ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ)
|
|
|
|
n = ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ;
|
|
|
|
}
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
t = schedule_timeout(n);
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
if (unlikely(!t) && n != left && m->c < 0)
|
|
|
|
left = t;
|
|
|
|
else
|
|
|
|
left = t + (left - n);
|
|
|
|
if (unlikely(signal_pending(current)))
|
|
|
|
left = -EINTR;
|
|
|
|
} while (left > 0);
|
|
|
|
|
|
|
|
if (!list_empty(&wait.task_list))
|
|
|
|
list_del(&wait.task_list);
|
|
|
|
else if (left <= 0 && waitqueue_active(&m->q))
|
|
|
|
__wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
|
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
|
|
|
|
if (likely(left > 0))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return left < 0 ? -EINTR : -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get(struct slot_map *m)
|
|
|
|
{
|
|
|
|
int res = 0;
|
|
|
|
spin_lock(&m->q.lock);
|
|
|
|
if (unlikely(m->c <= 0))
|
|
|
|
res = wait_for_free(m);
|
|
|
|
if (likely(!res)) {
|
|
|
|
m->c--;
|
|
|
|
res = find_first_zero_bit(m->map, m->count);
|
|
|
|
__set_bit(res, m->map);
|
|
|
|
}
|
|
|
|
spin_unlock(&m->q.lock);
|
|
|
|
return res;
|
|
|
|
}
|
2015-07-17 14:38:13 +00:00
|
|
|
|
2015-12-15 19:45:12 +00:00
|
|
|
/* used to describe mapped buffers */
|
|
|
|
struct orangefs_bufmap_desc {
|
|
|
|
void *uaddr; /* user space address pointer */
|
|
|
|
struct page **page_array; /* array of mapped pages */
|
|
|
|
int array_count; /* size of above arrays */
|
|
|
|
struct list_head list_link;
|
|
|
|
};
|
|
|
|
|
2015-11-24 20:12:14 +00:00
|
|
|
static struct orangefs_bufmap {
|
2015-07-17 14:38:13 +00:00
|
|
|
int desc_size;
|
|
|
|
int desc_shift;
|
|
|
|
int desc_count;
|
|
|
|
int total_size;
|
|
|
|
int page_count;
|
|
|
|
|
|
|
|
struct page **page_array;
|
2015-11-24 20:12:14 +00:00
|
|
|
struct orangefs_bufmap_desc *desc_array;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
/* array to track usage of buffer descriptors */
|
2016-02-14 02:01:21 +00:00
|
|
|
unsigned long *buffer_index_array;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
/* array to track usage of buffer descriptors for readdir */
|
2016-02-14 02:01:21 +00:00
|
|
|
#define N DIV_ROUND_UP(ORANGEFS_READDIR_DEFAULT_DESC_COUNT, BITS_PER_LONG)
|
|
|
|
unsigned long readdir_index_array[N];
|
|
|
|
#undef N
|
2015-11-24 20:12:14 +00:00
|
|
|
} *__orangefs_bufmap;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
2015-11-24 20:12:14 +00:00
|
|
|
static DEFINE_SPINLOCK(orangefs_bufmap_lock);
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
static void
|
2015-11-24 20:12:14 +00:00
|
|
|
orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < bufmap->page_count; i++)
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
put_page(bufmap->page_array[i]);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2015-11-24 20:12:14 +00:00
|
|
|
orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
|
|
|
kfree(bufmap->page_array);
|
|
|
|
kfree(bufmap->desc_array);
|
|
|
|
kfree(bufmap->buffer_index_array);
|
|
|
|
kfree(bufmap);
|
|
|
|
}
|
|
|
|
|
2015-12-15 19:54:27 +00:00
|
|
|
/*
|
|
|
|
* XXX: Can the size and shift change while the caller gives up the
|
|
|
|
* XXX: lock between calling this and doing something useful?
|
|
|
|
*/
|
|
|
|
|
2015-12-15 19:48:17 +00:00
|
|
|
int orangefs_bufmap_size_query(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2015-12-15 19:54:27 +00:00
|
|
|
struct orangefs_bufmap *bufmap;
|
|
|
|
int size = 0;
|
2016-02-13 16:16:37 +00:00
|
|
|
spin_lock(&orangefs_bufmap_lock);
|
|
|
|
bufmap = __orangefs_bufmap;
|
|
|
|
if (bufmap)
|
2015-12-15 19:54:27 +00:00
|
|
|
size = bufmap->desc_size;
|
2016-02-13 16:16:37 +00:00
|
|
|
spin_unlock(&orangefs_bufmap_lock);
|
2015-07-17 14:38:13 +00:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2015-12-15 19:48:17 +00:00
|
|
|
int orangefs_bufmap_shift_query(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2015-12-15 19:54:27 +00:00
|
|
|
struct orangefs_bufmap *bufmap;
|
|
|
|
int shift = 0;
|
2016-02-13 16:16:37 +00:00
|
|
|
spin_lock(&orangefs_bufmap_lock);
|
|
|
|
bufmap = __orangefs_bufmap;
|
|
|
|
if (bufmap)
|
2015-12-15 19:54:27 +00:00
|
|
|
shift = bufmap->desc_shift;
|
2016-02-13 16:16:37 +00:00
|
|
|
spin_unlock(&orangefs_bufmap_lock);
|
2015-07-17 14:38:13 +00:00
|
|
|
return shift;
|
|
|
|
}
|
|
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
|
|
|
|
|
|
|
|
/*
|
2016-01-04 20:05:28 +00:00
|
|
|
* orangefs_get_bufmap_init
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* If bufmap_init is 1, then the shared memory system, including the
|
|
|
|
* buffer_index_array, is available. Otherwise, it is not.
|
|
|
|
*
|
|
|
|
* returns the value of bufmap_init
|
|
|
|
*/
|
2016-01-04 20:05:28 +00:00
|
|
|
int orangefs_get_bufmap_init(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2015-11-24 20:12:14 +00:00
|
|
|
return __orangefs_bufmap ? 1 : 0;
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-11-24 20:12:14 +00:00
|
|
|
static struct orangefs_bufmap *
|
|
|
|
orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2015-11-24 20:12:14 +00:00
|
|
|
struct orangefs_bufmap *bufmap;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
|
|
|
|
if (!bufmap)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
bufmap->total_size = user_desc->total_size;
|
|
|
|
bufmap->desc_count = user_desc->count;
|
|
|
|
bufmap->desc_size = user_desc->size;
|
|
|
|
bufmap->desc_shift = ilog2(bufmap->desc_size);
|
|
|
|
|
|
|
|
bufmap->buffer_index_array =
|
2016-02-14 02:01:21 +00:00
|
|
|
kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
|
2015-07-17 14:38:13 +00:00
|
|
|
if (!bufmap->buffer_index_array) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs: could not allocate %d buffer indices\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
bufmap->desc_count);
|
|
|
|
goto out_free_bufmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
bufmap->desc_array =
|
2015-11-24 20:12:14 +00:00
|
|
|
kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
|
2015-07-17 14:38:13 +00:00
|
|
|
GFP_KERNEL);
|
|
|
|
if (!bufmap->desc_array) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs: could not allocate %d descriptors\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
bufmap->desc_count);
|
|
|
|
goto out_free_index_array;
|
|
|
|
}
|
|
|
|
|
|
|
|
bufmap->page_count = bufmap->total_size / PAGE_SIZE;
|
|
|
|
|
|
|
|
/* allocate storage to track our page mappings */
|
|
|
|
bufmap->page_array =
|
|
|
|
kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
|
|
|
|
if (!bufmap->page_array)
|
|
|
|
goto out_free_desc_array;
|
|
|
|
|
|
|
|
return bufmap;
|
|
|
|
|
|
|
|
out_free_desc_array:
|
|
|
|
kfree(bufmap->desc_array);
|
|
|
|
out_free_index_array:
|
|
|
|
kfree(bufmap->buffer_index_array);
|
|
|
|
out_free_bufmap:
|
|
|
|
kfree(bufmap);
|
|
|
|
out:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2015-11-24 20:12:14 +00:00
|
|
|
orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
|
|
|
|
struct ORANGEFS_dev_map_desc *user_desc)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
|
|
|
int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
|
|
|
|
int offset = 0, ret, i;
|
|
|
|
|
|
|
|
/* map the pages */
|
2015-10-09 00:10:00 +00:00
|
|
|
ret = get_user_pages_fast((unsigned long)user_desc->ptr,
|
|
|
|
bufmap->page_count, 1, bufmap->page_array);
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
if (ret != bufmap->page_count) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs error: asked for %d pages, only got %d.\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
bufmap->page_count, ret);
|
|
|
|
|
|
|
|
for (i = 0; i < ret; i++) {
|
|
|
|
SetPageError(bufmap->page_array[i]);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 12:29:47 +00:00
|
|
|
put_page(bufmap->page_array[i]);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ideally we want to get kernel space pointers for each page, but
|
|
|
|
* we can't kmap that many pages at once if highmem is being used.
|
|
|
|
* so instead, we just kmap/kunmap the page address each time the
|
|
|
|
* kaddr is needed.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < bufmap->page_count; i++)
|
|
|
|
flush_dcache_page(bufmap->page_array[i]);
|
|
|
|
|
|
|
|
/* build a list of available descriptors */
|
|
|
|
for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
|
|
|
|
bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
|
|
|
|
bufmap->desc_array[i].array_count = pages_per_desc;
|
|
|
|
bufmap->desc_array[i].uaddr =
|
|
|
|
(user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
|
|
|
|
offset += pages_per_desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-24 20:12:14 +00:00
|
|
|
* orangefs_bufmap_initialize()
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* initializes the mapped buffer interface
|
|
|
|
*
|
|
|
|
* returns 0 on success, -errno on failure
|
|
|
|
*/
|
2015-11-24 20:12:14 +00:00
|
|
|
int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2015-11-24 20:12:14 +00:00
|
|
|
struct orangefs_bufmap *bufmap;
|
2015-07-17 14:38:13 +00:00
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG,
|
2015-11-24 20:12:14 +00:00
|
|
|
"orangefs_bufmap_initialize: called (ptr ("
|
2015-07-17 14:38:13 +00:00
|
|
|
"%p) sz (%d) cnt(%d).\n",
|
|
|
|
user_desc->ptr,
|
|
|
|
user_desc->size,
|
|
|
|
user_desc->count);
|
|
|
|
|
2017-01-21 05:04:45 +00:00
|
|
|
if (user_desc->total_size < 0 ||
|
|
|
|
user_desc->size < 0 ||
|
|
|
|
user_desc->count < 0)
|
|
|
|
goto out;
|
|
|
|
|
2015-07-17 14:38:13 +00:00
|
|
|
/*
|
|
|
|
* sanity check alignment and size of buffer that caller wants to
|
|
|
|
* work with
|
|
|
|
*/
|
|
|
|
if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
|
|
|
|
(unsigned long)user_desc->ptr) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs error: memory alignment (front). %p\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
user_desc->ptr);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
|
|
|
|
!= (unsigned long)(user_desc->ptr + user_desc->total_size)) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
user_desc->ptr,
|
|
|
|
user_desc->total_size);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (user_desc->total_size != (user_desc->size * user_desc->count)) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
user_desc->total_size,
|
|
|
|
user_desc->size,
|
|
|
|
user_desc->count);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((user_desc->size % PAGE_SIZE) != 0) {
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
|
2015-07-17 14:38:13 +00:00
|
|
|
user_desc->size);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
2015-11-24 20:12:14 +00:00
|
|
|
bufmap = orangefs_bufmap_alloc(user_desc);
|
2015-07-17 14:38:13 +00:00
|
|
|
if (!bufmap)
|
|
|
|
goto out;
|
|
|
|
|
2015-11-24 20:12:14 +00:00
|
|
|
ret = orangefs_bufmap_map(bufmap, user_desc);
|
2015-07-17 14:38:13 +00:00
|
|
|
if (ret)
|
|
|
|
goto out_free_bufmap;
|
|
|
|
|
|
|
|
|
2015-11-24 20:12:14 +00:00
|
|
|
spin_lock(&orangefs_bufmap_lock);
|
|
|
|
if (__orangefs_bufmap) {
|
|
|
|
spin_unlock(&orangefs_bufmap_lock);
|
|
|
|
gossip_err("orangefs: error: bufmap already initialized.\n");
|
2016-02-14 02:01:21 +00:00
|
|
|
ret = -EINVAL;
|
2015-07-17 14:38:13 +00:00
|
|
|
goto out_unmap_bufmap;
|
|
|
|
}
|
2015-11-24 20:12:14 +00:00
|
|
|
__orangefs_bufmap = bufmap;
|
2016-02-14 02:01:21 +00:00
|
|
|
install(&rw_map,
|
|
|
|
bufmap->desc_count,
|
|
|
|
bufmap->buffer_index_array);
|
|
|
|
install(&readdir_map,
|
|
|
|
ORANGEFS_READDIR_DEFAULT_DESC_COUNT,
|
|
|
|
bufmap->readdir_index_array);
|
2015-11-24 20:12:14 +00:00
|
|
|
spin_unlock(&orangefs_bufmap_lock);
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG,
|
2015-11-24 20:12:14 +00:00
|
|
|
"orangefs_bufmap_initialize: exiting normally\n");
|
2015-07-17 14:38:13 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_unmap_bufmap:
|
2015-11-24 20:12:14 +00:00
|
|
|
orangefs_bufmap_unmap(bufmap);
|
2015-07-17 14:38:13 +00:00
|
|
|
out_free_bufmap:
|
2015-11-24 20:12:14 +00:00
|
|
|
orangefs_bufmap_free(bufmap);
|
2015-07-17 14:38:13 +00:00
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-24 20:12:14 +00:00
|
|
|
* orangefs_bufmap_finalize()
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* shuts down the mapped buffer interface and releases any resources
|
|
|
|
* associated with it
|
|
|
|
*
|
|
|
|
* no return value
|
|
|
|
*/
|
2015-11-24 20:12:14 +00:00
|
|
|
void orangefs_bufmap_finalize(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-14 02:01:21 +00:00
|
|
|
struct orangefs_bufmap *bufmap = __orangefs_bufmap;
|
|
|
|
if (!bufmap)
|
|
|
|
return;
|
2015-11-24 20:12:14 +00:00
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
|
2016-02-14 02:01:21 +00:00
|
|
|
mark_killed(&rw_map);
|
|
|
|
mark_killed(&readdir_map);
|
2015-07-17 14:38:13 +00:00
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG,
|
2015-11-24 20:12:14 +00:00
|
|
|
"orangefs_bufmap_finalize: exiting normally\n");
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-14 02:01:21 +00:00
|
|
|
void orangefs_bufmap_run_down(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-14 02:01:21 +00:00
|
|
|
struct orangefs_bufmap *bufmap = __orangefs_bufmap;
|
|
|
|
if (!bufmap)
|
2015-07-17 14:38:13 +00:00
|
|
|
return;
|
2016-02-14 02:01:21 +00:00
|
|
|
run_down(&rw_map);
|
|
|
|
run_down(&readdir_map);
|
|
|
|
spin_lock(&orangefs_bufmap_lock);
|
|
|
|
__orangefs_bufmap = NULL;
|
|
|
|
spin_unlock(&orangefs_bufmap_lock);
|
|
|
|
orangefs_bufmap_unmap(bufmap);
|
|
|
|
orangefs_bufmap_free(bufmap);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-24 20:12:14 +00:00
|
|
|
* orangefs_bufmap_get()
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* gets a free mapped buffer descriptor, will sleep until one becomes
|
|
|
|
* available if necessary
|
|
|
|
*
|
2016-02-17 01:10:26 +00:00
|
|
|
* returns slot on success, -errno on failure
|
2015-07-17 14:38:13 +00:00
|
|
|
*/
|
2016-02-17 01:10:26 +00:00
|
|
|
int orangefs_bufmap_get(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-17 01:10:26 +00:00
|
|
|
return get(&rw_map);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-11-24 20:12:14 +00:00
|
|
|
* orangefs_bufmap_put()
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* returns a mapped buffer descriptor to the collection
|
|
|
|
*
|
|
|
|
* no return value
|
|
|
|
*/
|
2016-02-12 02:34:52 +00:00
|
|
|
void orangefs_bufmap_put(int buffer_index)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-14 02:01:21 +00:00
|
|
|
put(&rw_map, buffer_index);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-01-04 20:05:28 +00:00
|
|
|
* orangefs_readdir_index_get()
|
2015-07-17 14:38:13 +00:00
|
|
|
*
|
|
|
|
* gets a free descriptor, will sleep until one becomes
|
|
|
|
* available if necessary.
|
|
|
|
* Although the readdir buffers are not mapped into kernel space
|
|
|
|
* we could do that at a later point of time. Regardless, these
|
|
|
|
* indices are used by the client-core.
|
|
|
|
*
|
2016-02-17 01:10:26 +00:00
|
|
|
* returns slot on success, -errno on failure
|
2015-07-17 14:38:13 +00:00
|
|
|
*/
|
2016-02-17 01:10:26 +00:00
|
|
|
int orangefs_readdir_index_get(void)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-17 01:10:26 +00:00
|
|
|
return get(&readdir_map);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
2016-02-14 02:04:51 +00:00
|
|
|
void orangefs_readdir_index_put(int buffer_index)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-14 02:01:21 +00:00
|
|
|
put(&readdir_map, buffer_index);
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
|
|
|
|
2015-12-11 15:50:42 +00:00
|
|
|
/*
|
|
|
|
* we've been handed an iovec, we need to copy it to
|
|
|
|
* the shared memory descriptor at "buffer_index".
|
|
|
|
*/
|
2016-02-17 01:06:19 +00:00
|
|
|
int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter,
|
2015-10-05 17:44:24 +00:00
|
|
|
int buffer_index,
|
|
|
|
size_t size)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-17 01:06:19 +00:00
|
|
|
struct orangefs_bufmap_desc *to;
|
2015-09-04 14:31:16 +00:00
|
|
|
int i;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG,
|
2015-10-08 21:47:44 +00:00
|
|
|
"%s: buffer_index:%d: size:%zu:\n",
|
2015-09-04 14:31:16 +00:00
|
|
|
__func__, buffer_index, size);
|
2015-07-17 14:38:13 +00:00
|
|
|
|
2016-02-17 01:06:19 +00:00
|
|
|
to = &__orangefs_bufmap->desc_array[buffer_index];
|
2015-09-04 14:31:16 +00:00
|
|
|
for (i = 0; size; i++) {
|
2015-10-08 21:47:44 +00:00
|
|
|
struct page *page = to->page_array[i];
|
|
|
|
size_t n = size;
|
|
|
|
if (n > PAGE_SIZE)
|
|
|
|
n = PAGE_SIZE;
|
|
|
|
n = copy_page_from_iter(page, 0, n, iter);
|
|
|
|
if (!n)
|
|
|
|
return -EFAULT;
|
|
|
|
size -= n;
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
2015-10-08 21:47:44 +00:00
|
|
|
return 0;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2015-12-11 15:50:42 +00:00
|
|
|
* we've been handed an iovec, we need to fill it from
|
|
|
|
* the shared memory descriptor at "buffer_index".
|
2015-07-17 14:38:13 +00:00
|
|
|
*/
|
2016-02-17 01:06:19 +00:00
|
|
|
int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
|
2015-10-08 21:43:58 +00:00
|
|
|
int buffer_index,
|
|
|
|
size_t size)
|
2015-07-17 14:38:13 +00:00
|
|
|
{
|
2016-02-17 01:06:19 +00:00
|
|
|
struct orangefs_bufmap_desc *from;
|
2015-09-04 14:31:16 +00:00
|
|
|
int i;
|
2015-07-17 14:38:13 +00:00
|
|
|
|
2016-02-17 01:06:19 +00:00
|
|
|
from = &__orangefs_bufmap->desc_array[buffer_index];
|
2015-07-17 14:38:13 +00:00
|
|
|
gossip_debug(GOSSIP_BUFMAP_DEBUG,
|
2015-10-08 21:43:58 +00:00
|
|
|
"%s: buffer_index:%d: size:%zu:\n",
|
|
|
|
__func__, buffer_index, size);
|
2015-07-17 14:38:13 +00:00
|
|
|
|
|
|
|
|
2015-10-08 21:43:58 +00:00
|
|
|
for (i = 0; size; i++) {
|
|
|
|
struct page *page = from->page_array[i];
|
|
|
|
size_t n = size;
|
|
|
|
if (n > PAGE_SIZE)
|
|
|
|
n = PAGE_SIZE;
|
|
|
|
n = copy_page_to_iter(page, 0, n, iter);
|
|
|
|
if (!n)
|
|
|
|
return -EFAULT;
|
|
|
|
size -= n;
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|
2015-10-08 21:43:58 +00:00
|
|
|
return 0;
|
2015-07-17 14:38:13 +00:00
|
|
|
}
|