SLUB: Fix coding style violations

This fixes most of the obvious coding style violations in mm/slub.c as
reported by checkpatch.

Acked-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
This commit is contained in:
Pekka Enberg 2008-01-07 23:20:27 -08:00 committed by Christoph Lameter
parent 7c2e132c54
commit 064287807c

View File

@ -357,22 +357,22 @@ static void print_section(char *text, u8 *addr, unsigned int length)
printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
newline = 0;
}
printk(" %02x", addr[i]);
printk(KERN_CONT " %02x", addr[i]);
offset = i % 16;
ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
if (offset == 15) {
printk(" %s\n",ascii);
printk(KERN_CONT " %s\n", ascii);
newline = 1;
}
}
if (!newline) {
i %= 16;
while (i < 16) {
printk(" ");
printk(KERN_CONT " ");
ascii[i] = ' ';
i++;
}
printk(" %s\n", ascii);
printk(KERN_CONT " %s\n", ascii);
}
}
@ -532,7 +532,7 @@ static void init_object(struct kmem_cache *s, void *object, int active)
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->objsize - 1);
p[s->objsize -1] = POISON_END;
p[s->objsize - 1] = POISON_END;
}
if (s->flags & SLAB_RED_ZONE)
@ -561,7 +561,7 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *object, char *what,
u8* start, unsigned int value, unsigned int bytes)
u8 *start, unsigned int value, unsigned int bytes)
{
u8 *fault;
u8 *end;
@ -695,7 +695,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
(!check_bytes_and_report(s, page, p, "Poison", p,
POISON_FREE, s->objsize - 1) ||
!check_bytes_and_report(s, page, p, "Poison",
p + s->objsize -1, POISON_END, 1)))
p + s->objsize - 1, POISON_END, 1)))
return 0;
/*
* check_pad_bytes cleans up on its own.
@ -903,8 +903,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
"SLUB <none>: no slab for object 0x%p.\n",
object);
dump_stack();
}
else
} else
object_err(s, page, object,
"page slab pointer corrupt.");
goto fail;
@ -950,7 +949,7 @@ static int __init setup_slub_debug(char *str)
/*
* Determine which debug features should be switched on
*/
for ( ;*str && *str != ','; str++) {
for (; *str && *str != ','; str++) {
switch (tolower(*str)) {
case 'f':
slub_debug |= SLAB_DEBUG_FREE;
@ -969,7 +968,7 @@ static int __init setup_slub_debug(char *str)
break;
default:
printk(KERN_ERR "slub_debug option '%c' "
"unknown. skipped\n",*str);
"unknown. skipped\n", *str);
}
}
@ -1042,7 +1041,7 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
*/
static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
{
struct page * page;
struct page *page;
int pages = 1 << s->order;
if (s->order)
@ -1138,7 +1137,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
mod_zone_page_state(page_zone(page),
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
- pages);
-pages);
__free_pages(page, s->order);
}
@ -1542,7 +1541,7 @@ debug:
*
* Otherwise we can simply pick the next object from the lockless free list.
*/
static void __always_inline *slab_alloc(struct kmem_cache *s,
static __always_inline void *slab_alloc(struct kmem_cache *s,
gfp_t gfpflags, int node, void *addr)
{
void **object;
@ -1650,7 +1649,7 @@ debug:
* If fastpath is not possible then fall back to __slab_free where we deal
* with all sorts of special processing.
*/
static void __always_inline slab_free(struct kmem_cache *s,
static __always_inline void slab_free(struct kmem_cache *s,
struct page *page, void *x, void *addr)
{
void **object = (void *)x;
@ -2231,7 +2230,7 @@ error:
*/
int kmem_ptr_validate(struct kmem_cache *s, const void *object)
{
struct page * page;
struct page *page;
page = get_object_page(object);
@ -2343,7 +2342,7 @@ static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
static int __init setup_slub_min_order(char *str)
{
get_option (&str, &slub_min_order);
get_option(&str, &slub_min_order);
return 1;
}
@ -2352,7 +2351,7 @@ __setup("slub_min_order=", setup_slub_min_order);
static int __init setup_slub_max_order(char *str)
{
get_option (&str, &slub_max_order);
get_option(&str, &slub_max_order);
return 1;
}
@ -2361,7 +2360,7 @@ __setup("slub_max_order=", setup_slub_max_order);
static int __init setup_slub_min_objects(char *str)
{
get_option (&str, &slub_min_objects);
get_option(&str, &slub_min_objects);
return 1;
}
@ -2946,7 +2945,7 @@ static struct kmem_cache *find_mergeable(size_t size,
* Check if alignment is compatible.
* Courtesy of Adrian Drzewiecki
*/
if ((s->size & ~(align -1)) != s->size)
if ((s->size & ~(align - 1)) != s->size)
continue;
if (s->size - size >= sizeof(void *))
@ -3055,8 +3054,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata slab_notifier =
{ &slab_cpuup_callback, NULL, 0 };
static struct notifier_block __cpuinitdata slab_notifier = {
&slab_cpuup_callback, NULL, 0
};
#endif
@ -3864,7 +3864,7 @@ static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
SLAB_ATTR(remote_node_defrag_ratio);
#endif
static struct attribute * slab_attrs[] = {
static struct attribute *slab_attrs[] = {
&slab_size_attr.attr,
&object_size_attr.attr,
&objs_per_slab_attr.attr,