Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
|
||||
idr.o int_sqrt.o extable.o prio_tree.o \
|
||||
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
|
||||
proportions.o prio_heap.o ratelimit.o show_mem.o \
|
||||
is_single_threaded.o plist.o decompress.o
|
||||
is_single_threaded.o plist.o decompress.o flex_array.o
|
||||
|
||||
lib-$(CONFIG_MMU) += ioremap.o
|
||||
lib-$(CONFIG_SMP) += cpumask.o
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/atomic.h>
|
||||
|
||||
/*
|
||||
@@ -52,6 +53,7 @@ long long atomic64_read(const atomic64_t *v)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_read);
|
||||
|
||||
void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
@@ -62,6 +64,7 @@ void atomic64_set(atomic64_t *v, long long i)
|
||||
v->counter = i;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_set);
|
||||
|
||||
void atomic64_add(long long a, atomic64_t *v)
|
||||
{
|
||||
@@ -72,6 +75,7 @@ void atomic64_add(long long a, atomic64_t *v)
|
||||
v->counter += a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add);
|
||||
|
||||
long long atomic64_add_return(long long a, atomic64_t *v)
|
||||
{
|
||||
@@ -84,6 +88,7 @@ long long atomic64_add_return(long long a, atomic64_t *v)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_return);
|
||||
|
||||
void atomic64_sub(long long a, atomic64_t *v)
|
||||
{
|
||||
@@ -94,6 +99,7 @@ void atomic64_sub(long long a, atomic64_t *v)
|
||||
v->counter -= a;
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub);
|
||||
|
||||
long long atomic64_sub_return(long long a, atomic64_t *v)
|
||||
{
|
||||
@@ -106,6 +112,7 @@ long long atomic64_sub_return(long long a, atomic64_t *v)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_sub_return);
|
||||
|
||||
long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
@@ -120,6 +127,7 @@ long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_dec_if_positive);
|
||||
|
||||
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
{
|
||||
@@ -134,6 +142,7 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_cmpxchg);
|
||||
|
||||
long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
{
|
||||
@@ -147,6 +156,7 @@ long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_xchg);
|
||||
|
||||
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
@@ -162,6 +172,7 @@ int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic64_add_unless);
|
||||
|
||||
static int init_atomic64_lock(void)
|
||||
{
|
||||
|
||||
@@ -45,12 +45,14 @@
|
||||
*/
|
||||
|
||||
|
||||
#ifndef STATIC
|
||||
#ifdef STATIC
|
||||
#define PREBOOT
|
||||
#else
|
||||
#include <linux/decompress/bunzip2.h>
|
||||
#endif /* !STATIC */
|
||||
#include <linux/slab.h>
|
||||
#endif /* STATIC */
|
||||
|
||||
#include <linux/decompress/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#ifndef INT_MAX
|
||||
#define INT_MAX 0x7fffffff
|
||||
@@ -681,9 +683,7 @@ STATIC int INIT bunzip2(unsigned char *buf, int len,
|
||||
set_error_fn(error_fn);
|
||||
if (flush)
|
||||
outbuf = malloc(BZIP2_IOBUF_SIZE);
|
||||
else
|
||||
len -= 4; /* Uncompressed size hack active in pre-boot
|
||||
environment */
|
||||
|
||||
if (!outbuf) {
|
||||
error("Could not allocate output bufer");
|
||||
return -1;
|
||||
@@ -733,4 +733,14 @@ exit_0:
|
||||
return i;
|
||||
}
|
||||
|
||||
#define decompress bunzip2
|
||||
#ifdef PREBOOT
|
||||
STATIC int INIT decompress(unsigned char *buf, int len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *outbuf,
|
||||
int *pos,
|
||||
void(*error_fn)(char *x))
|
||||
{
|
||||
return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -19,13 +19,13 @@
|
||||
#include "zlib_inflate/inflate.h"
|
||||
|
||||
#include "zlib_inflate/infutil.h"
|
||||
#include <linux/slab.h>
|
||||
|
||||
#endif /* STATIC */
|
||||
|
||||
#include <linux/decompress/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define INBUF_LEN (16*1024)
|
||||
#define GZIP_IOBUF_SIZE (16*1024)
|
||||
|
||||
/* Included from initramfs et al code */
|
||||
STATIC int INIT gunzip(unsigned char *buf, int len,
|
||||
@@ -55,7 +55,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
|
||||
if (buf)
|
||||
zbuf = buf;
|
||||
else {
|
||||
zbuf = malloc(INBUF_LEN);
|
||||
zbuf = malloc(GZIP_IOBUF_SIZE);
|
||||
len = 0;
|
||||
}
|
||||
if (!zbuf) {
|
||||
@@ -77,7 +77,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
|
||||
}
|
||||
|
||||
if (len == 0)
|
||||
len = fill(zbuf, INBUF_LEN);
|
||||
len = fill(zbuf, GZIP_IOBUF_SIZE);
|
||||
|
||||
/* verify the gzip header */
|
||||
if (len < 10 ||
|
||||
@@ -113,7 +113,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len,
|
||||
while (rc == Z_OK) {
|
||||
if (strm->avail_in == 0) {
|
||||
/* TODO: handle case where both pos and fill are set */
|
||||
len = fill(zbuf, INBUF_LEN);
|
||||
len = fill(zbuf, GZIP_IOBUF_SIZE);
|
||||
if (len < 0) {
|
||||
rc = -1;
|
||||
error("read error");
|
||||
|
||||
@@ -29,12 +29,14 @@
|
||||
*Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef STATIC
|
||||
#ifdef STATIC
|
||||
#define PREBOOT
|
||||
#else
|
||||
#include <linux/decompress/unlzma.h>
|
||||
#include <linux/slab.h>
|
||||
#endif /* STATIC */
|
||||
|
||||
#include <linux/decompress/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
|
||||
|
||||
@@ -543,9 +545,7 @@ STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
|
||||
int ret = -1;
|
||||
|
||||
set_error_fn(error_fn);
|
||||
if (!flush)
|
||||
in_len -= 4; /* Uncompressed size hack active in pre-boot
|
||||
environment */
|
||||
|
||||
if (buf)
|
||||
inbuf = buf;
|
||||
else
|
||||
@@ -645,4 +645,15 @@ exit_0:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define decompress unlzma
|
||||
#ifdef PREBOOT
|
||||
STATIC int INIT decompress(unsigned char *buf, int in_len,
|
||||
int(*fill)(void*, unsigned int),
|
||||
int(*flush)(void*, unsigned int),
|
||||
unsigned char *output,
|
||||
int *posp,
|
||||
void(*error_fn)(char *x)
|
||||
)
|
||||
{
|
||||
return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -716,7 +716,7 @@ void dma_debug_init(u32 num_entries)
|
||||
|
||||
for (i = 0; i < HASH_SIZE; ++i) {
|
||||
INIT_LIST_HEAD(&dma_entry_hash[i].list);
|
||||
dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED;
|
||||
spin_lock_init(&dma_entry_hash[i].lock);
|
||||
}
|
||||
|
||||
if (dma_debug_fs_init() != 0) {
|
||||
@@ -856,22 +856,21 @@ static void check_for_stack(struct device *dev, void *addr)
|
||||
"stack [addr=%p]\n", addr);
|
||||
}
|
||||
|
||||
static inline bool overlap(void *addr, u64 size, void *start, void *end)
|
||||
static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
|
||||
{
|
||||
void *addr2 = (char *)addr + size;
|
||||
unsigned long a1 = (unsigned long)addr;
|
||||
unsigned long b1 = a1 + len;
|
||||
unsigned long a2 = (unsigned long)start;
|
||||
unsigned long b2 = (unsigned long)end;
|
||||
|
||||
return ((addr >= start && addr < end) ||
|
||||
(addr2 >= start && addr2 < end) ||
|
||||
((addr < start) && (addr2 >= end)));
|
||||
return !(b1 <= a2 || a1 >= b2);
|
||||
}
|
||||
|
||||
static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
|
||||
static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
|
||||
{
|
||||
if (overlap(addr, size, _text, _etext) ||
|
||||
overlap(addr, size, __start_rodata, __end_rodata))
|
||||
err_printk(dev, NULL, "DMA-API: device driver maps "
|
||||
"memory from kernel text or rodata "
|
||||
"[addr=%p] [size=%llu]\n", addr, size);
|
||||
if (overlap(addr, len, _text, _etext) ||
|
||||
overlap(addr, len, __start_rodata, __end_rodata))
|
||||
err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
|
||||
}
|
||||
|
||||
static void check_sync(struct device *dev,
|
||||
@@ -969,7 +968,8 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
|
||||
entry->type = dma_debug_single;
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
void *addr = ((char *)page_address(page)) + offset;
|
||||
void *addr = page_address(page) + offset;
|
||||
|
||||
check_for_stack(dev, addr);
|
||||
check_for_illegal_area(dev, addr, size);
|
||||
}
|
||||
|
||||
@@ -164,7 +164,7 @@ static void ddebug_change(const struct ddebug_query *query,
|
||||
|
||||
if (!newflags)
|
||||
dt->num_enabled--;
|
||||
else if (!dp-flags)
|
||||
else if (!dp->flags)
|
||||
dt->num_enabled++;
|
||||
dp->flags = newflags;
|
||||
if (newflags) {
|
||||
|
||||
267
lib/flex_array.c
Normal file
267
lib/flex_array.c
Normal file
@@ -0,0 +1,267 @@
|
||||
/*
|
||||
* Flexible array managed in PAGE_SIZE parts
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright IBM Corporation, 2009
|
||||
*
|
||||
* Author: Dave Hansen <dave@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/flex_array.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
|
||||
struct flex_array_part {
|
||||
char elements[FLEX_ARRAY_PART_SIZE];
|
||||
};
|
||||
|
||||
static inline int __elements_per_part(int element_size)
|
||||
{
|
||||
return FLEX_ARRAY_PART_SIZE / element_size;
|
||||
}
|
||||
|
||||
static inline int bytes_left_in_base(void)
|
||||
{
|
||||
int element_offset = offsetof(struct flex_array, parts);
|
||||
int bytes_left = FLEX_ARRAY_BASE_SIZE - element_offset;
|
||||
return bytes_left;
|
||||
}
|
||||
|
||||
static inline int nr_base_part_ptrs(void)
|
||||
{
|
||||
return bytes_left_in_base() / sizeof(struct flex_array_part *);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a user requests an allocation which is small
|
||||
* enough, we may simply use the space in the
|
||||
* flex_array->parts[] array to store the user
|
||||
* data.
|
||||
*/
|
||||
static inline int elements_fit_in_base(struct flex_array *fa)
|
||||
{
|
||||
int data_size = fa->element_size * fa->total_nr_elements;
|
||||
if (data_size <= bytes_left_in_base())
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* flex_array_alloc - allocate a new flexible array
|
||||
* @element_size: the size of individual elements in the array
|
||||
* @total: total number of elements that this should hold
|
||||
*
|
||||
* Note: all locking must be provided by the caller.
|
||||
*
|
||||
* @total is used to size internal structures. If the user ever
|
||||
* accesses any array indexes >=@total, it will produce errors.
|
||||
*
|
||||
* The maximum number of elements is defined as: the number of
|
||||
* elements that can be stored in a page times the number of
|
||||
* page pointers that we can fit in the base structure or (using
|
||||
* integer math):
|
||||
*
|
||||
* (PAGE_SIZE/element_size) * (PAGE_SIZE-8)/sizeof(void *)
|
||||
*
|
||||
* Here's a table showing example capacities. Note that the maximum
|
||||
* index that the get/put() functions is just nr_objects-1. This
|
||||
* basically means that you get 4MB of storage on 32-bit and 2MB on
|
||||
* 64-bit.
|
||||
*
|
||||
*
|
||||
* Element size | Objects | Objects |
|
||||
* PAGE_SIZE=4k | 32-bit | 64-bit |
|
||||
* ---------------------------------|
|
||||
* 1 bytes | 4186112 | 2093056 |
|
||||
* 2 bytes | 2093056 | 1046528 |
|
||||
* 3 bytes | 1395030 | 697515 |
|
||||
* 4 bytes | 1046528 | 523264 |
|
||||
* 32 bytes | 130816 | 65408 |
|
||||
* 33 bytes | 126728 | 63364 |
|
||||
* 2048 bytes | 2044 | 1022 |
|
||||
* 2049 bytes | 1022 | 511 |
|
||||
* void * | 1046528 | 261632 |
|
||||
*
|
||||
* Since 64-bit pointers are twice the size, we lose half the
|
||||
* capacity in the base structure. Also note that no effort is made
|
||||
* to efficiently pack objects across page boundaries.
|
||||
*/
|
||||
struct flex_array *flex_array_alloc(int element_size, int total, gfp_t flags)
|
||||
{
|
||||
struct flex_array *ret;
|
||||
int max_size = nr_base_part_ptrs() * __elements_per_part(element_size);
|
||||
|
||||
/* max_size will end up 0 if element_size > PAGE_SIZE */
|
||||
if (total > max_size)
|
||||
return NULL;
|
||||
ret = kzalloc(sizeof(struct flex_array), flags);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
ret->element_size = element_size;
|
||||
ret->total_nr_elements = total;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int fa_element_to_part_nr(struct flex_array *fa, int element_nr)
|
||||
{
|
||||
return element_nr / __elements_per_part(fa->element_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* flex_array_free_parts - just free the second-level pages
|
||||
* @src: address of data to copy into the array
|
||||
* @element_nr: index of the position in which to insert
|
||||
* the new element.
|
||||
*
|
||||
* This is to be used in cases where the base 'struct flex_array'
|
||||
* has been statically allocated and should not be free.
|
||||
*/
|
||||
void flex_array_free_parts(struct flex_array *fa)
|
||||
{
|
||||
int part_nr;
|
||||
int max_part = nr_base_part_ptrs();
|
||||
|
||||
if (elements_fit_in_base(fa))
|
||||
return;
|
||||
for (part_nr = 0; part_nr < max_part; part_nr++)
|
||||
kfree(fa->parts[part_nr]);
|
||||
}
|
||||
|
||||
void flex_array_free(struct flex_array *fa)
|
||||
{
|
||||
flex_array_free_parts(fa);
|
||||
kfree(fa);
|
||||
}
|
||||
|
||||
static int fa_index_inside_part(struct flex_array *fa, int element_nr)
|
||||
{
|
||||
return element_nr % __elements_per_part(fa->element_size);
|
||||
}
|
||||
|
||||
static int index_inside_part(struct flex_array *fa, int element_nr)
|
||||
{
|
||||
int part_offset = fa_index_inside_part(fa, element_nr);
|
||||
return part_offset * fa->element_size;
|
||||
}
|
||||
|
||||
static struct flex_array_part *
|
||||
__fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
|
||||
{
|
||||
struct flex_array_part *part = fa->parts[part_nr];
|
||||
if (!part) {
|
||||
/*
|
||||
* This leaves the part pages uninitialized
|
||||
* and with potentially random data, just
|
||||
* as if the user had kmalloc()'d the whole.
|
||||
* __GFP_ZERO can be used to zero it.
|
||||
*/
|
||||
part = kmalloc(FLEX_ARRAY_PART_SIZE, flags);
|
||||
if (!part)
|
||||
return NULL;
|
||||
fa->parts[part_nr] = part;
|
||||
}
|
||||
return part;
|
||||
}
|
||||
|
||||
/**
|
||||
* flex_array_put - copy data into the array at @element_nr
|
||||
* @src: address of data to copy into the array
|
||||
* @element_nr: index of the position in which to insert
|
||||
* the new element.
|
||||
*
|
||||
* Note that this *copies* the contents of @src into
|
||||
* the array. If you are trying to store an array of
|
||||
* pointers, make sure to pass in &ptr instead of ptr.
|
||||
*
|
||||
* Locking must be provided by the caller.
|
||||
*/
|
||||
int flex_array_put(struct flex_array *fa, int element_nr, void *src, gfp_t flags)
|
||||
{
|
||||
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
||||
struct flex_array_part *part;
|
||||
void *dst;
|
||||
|
||||
if (element_nr >= fa->total_nr_elements)
|
||||
return -ENOSPC;
|
||||
if (elements_fit_in_base(fa))
|
||||
part = (struct flex_array_part *)&fa->parts[0];
|
||||
else
|
||||
part = __fa_get_part(fa, part_nr, flags);
|
||||
if (!part)
|
||||
return -ENOMEM;
|
||||
dst = &part->elements[index_inside_part(fa, element_nr)];
|
||||
memcpy(dst, src, fa->element_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* flex_array_prealloc - guarantee that array space exists
|
||||
* @start: index of first array element for which space is allocated
|
||||
* @end: index of last (inclusive) element for which space is allocated
|
||||
*
|
||||
* This will guarantee that no future calls to flex_array_put()
|
||||
* will allocate memory. It can be used if you are expecting to
|
||||
* be holding a lock or in some atomic context while writing
|
||||
* data into the array.
|
||||
*
|
||||
* Locking must be provided by the caller.
|
||||
*/
|
||||
int flex_array_prealloc(struct flex_array *fa, int start, int end, gfp_t flags)
|
||||
{
|
||||
int start_part;
|
||||
int end_part;
|
||||
int part_nr;
|
||||
struct flex_array_part *part;
|
||||
|
||||
if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
|
||||
return -ENOSPC;
|
||||
if (elements_fit_in_base(fa))
|
||||
return 0;
|
||||
start_part = fa_element_to_part_nr(fa, start);
|
||||
end_part = fa_element_to_part_nr(fa, end);
|
||||
for (part_nr = start_part; part_nr <= end_part; part_nr++) {
|
||||
part = __fa_get_part(fa, part_nr, flags);
|
||||
if (!part)
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* flex_array_get - pull data back out of the array
|
||||
* @element_nr: index of the element to fetch from the array
|
||||
*
|
||||
* Returns a pointer to the data at index @element_nr. Note
|
||||
* that this is a copy of the data that was passed in. If you
|
||||
* are using this to store pointers, you'll get back &ptr.
|
||||
*
|
||||
* Locking must be provided by the caller.
|
||||
*/
|
||||
void *flex_array_get(struct flex_array *fa, int element_nr)
|
||||
{
|
||||
int part_nr = fa_element_to_part_nr(fa, element_nr);
|
||||
struct flex_array_part *part;
|
||||
|
||||
if (element_nr >= fa->total_nr_elements)
|
||||
return NULL;
|
||||
if (!fa->parts[part_nr])
|
||||
return NULL;
|
||||
if (elements_fit_in_base(fa))
|
||||
part = (struct flex_array_part *)&fa->parts[0];
|
||||
else
|
||||
part = fa->parts[part_nr];
|
||||
return &part->elements[index_inside_part(fa, element_nr)];
|
||||
}
|
||||
@@ -314,6 +314,7 @@ void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
|
||||
miter->__sg = sgl;
|
||||
miter->__nents = nents;
|
||||
miter->__offset = 0;
|
||||
WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
|
||||
miter->__flags = flags;
|
||||
}
|
||||
EXPORT_SYMBOL(sg_miter_start);
|
||||
@@ -394,6 +395,9 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
|
||||
if (miter->addr) {
|
||||
miter->__offset += miter->consumed;
|
||||
|
||||
if (miter->__flags & SG_MITER_TO_SG)
|
||||
flush_kernel_dcache_page(miter->page);
|
||||
|
||||
if (miter->__flags & SG_MITER_ATOMIC) {
|
||||
WARN_ON(!irqs_disabled());
|
||||
kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
|
||||
@@ -426,8 +430,14 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
unsigned int offset = 0;
|
||||
struct sg_mapping_iter miter;
|
||||
unsigned long flags;
|
||||
unsigned int sg_flags = SG_MITER_ATOMIC;
|
||||
|
||||
sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
|
||||
if (to_buffer)
|
||||
sg_flags |= SG_MITER_FROM_SG;
|
||||
else
|
||||
sg_flags |= SG_MITER_TO_SG;
|
||||
|
||||
sg_miter_start(&miter, sgl, nents, sg_flags);
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
@@ -438,10 +448,8 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
|
||||
|
||||
if (to_buffer)
|
||||
memcpy(buf + offset, miter.addr, len);
|
||||
else {
|
||||
else
|
||||
memcpy(miter.addr, buf + offset, len);
|
||||
flush_kernel_dcache_page(miter.page);
|
||||
}
|
||||
|
||||
offset += len;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user