2020-05-12 23:54:17 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
2005-11-02 03:58:39 +00:00
|
|
|
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
|
|
|
|
* All Rights Reserved.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
#ifndef __XFS_SUPPORT_KMEM_H__
|
|
|
|
#define __XFS_SUPPORT_KMEM_H__
|
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
2010-01-20 21:55:30 +00:00
|
|
|
#include <linux/vmalloc.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-14 02:18:19 +00:00
|
|
|
/*
|
|
|
|
* General memory allocation interfaces
|
|
|
|
*/
|
|
|
|
|
2012-04-02 10:24:04 +00:00
|
|
|
typedef unsigned __bitwise xfs_km_flags_t;
|
|
|
|
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
|
|
|
|
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
|
2013-11-04 10:21:05 +00:00
|
|
|
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
|
2020-05-26 16:33:11 +00:00
|
|
|
#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
|
2006-03-14 02:18:19 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We use a special process flag to avoid recursive callbacks into
|
|
|
|
* the filesystem during transactions. We will also issue our own
|
|
|
|
* warnings, so we explicitly skip any generic ones (silly of us).
|
|
|
|
*/
|
|
|
|
static inline gfp_t
|
2012-04-02 10:24:04 +00:00
|
|
|
kmem_flags_convert(xfs_km_flags_t flags)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-03-14 02:18:19 +00:00
|
|
|
gfp_t lflags;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2020-05-26 16:33:11 +00:00
|
|
|
BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-26 19:06:22 +00:00
|
|
|
lflags = GFP_KERNEL | __GFP_NOWARN;
|
|
|
|
if (flags & KM_NOFS)
|
|
|
|
lflags &= ~__GFP_FS;
|
2013-11-04 10:21:05 +00:00
|
|
|
|
2017-07-12 21:36:49 +00:00
|
|
|
/*
|
|
|
|
* Default page/slab allocator behavior is to retry for ever
|
|
|
|
* for small allocations. We can override this behavior by using
|
|
|
|
* __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
|
|
|
|
* as it is feasible but rather fail than retry forever for all
|
|
|
|
* request sizes.
|
|
|
|
*/
|
|
|
|
if (flags & KM_MAYFAIL)
|
|
|
|
lflags |= __GFP_RETRY_MAYFAIL;
|
|
|
|
|
2013-11-04 10:21:05 +00:00
|
|
|
if (flags & KM_ZERO)
|
|
|
|
lflags |= __GFP_ZERO;
|
|
|
|
|
2020-05-26 16:33:11 +00:00
|
|
|
if (flags & KM_NOLOCKDEP)
|
|
|
|
lflags |= __GFP_NOLOCKDEP;
|
|
|
|
|
2006-03-14 02:18:19 +00:00
|
|
|
return lflags;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-04-02 10:24:04 +00:00
|
|
|
extern void *kmem_alloc(size_t, xfs_km_flags_t);
|
2015-02-01 22:54:18 +00:00
|
|
|
static inline void kmem_free(const void *ptr)
|
|
|
|
{
|
|
|
|
kvfree(ptr);
|
|
|
|
}
|
2006-03-14 02:18:19 +00:00
|
|
|
|
2010-01-20 21:55:30 +00:00
|
|
|
|
2013-11-04 10:21:05 +00:00
|
|
|
static inline void *
|
|
|
|
kmem_zalloc(size_t size, xfs_km_flags_t flags)
|
|
|
|
{
|
|
|
|
return kmem_alloc(size, flags | KM_ZERO);
|
2018-03-07 01:03:28 +00:00
|
|
|
}
|
|
|
|
|
2006-03-14 02:18:19 +00:00
|
|
|
/*
|
|
|
|
* Zone interfaces
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define kmem_zone kmem_cache
|
|
|
|
#define kmem_zone_t struct kmem_cache
|
|
|
|
|
2019-06-29 02:27:19 +00:00
|
|
|
static inline struct page *
|
|
|
|
kmem_to_page(void *addr)
|
|
|
|
{
|
|
|
|
if (is_vmalloc_addr(addr))
|
|
|
|
return vmalloc_to_page(addr);
|
|
|
|
return virt_to_page(addr);
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif /* __XFS_SUPPORT_KMEM_H__ */
|