staging: ramster: Provide accessory functions for counter decrease

This patch enables these functions to be wrapped and
can disable/enable this with CONFIG_DEBUG_FS.

Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Wanpeng Li 2013-04-10 08:25:54 +08:00 committed by Greg Kroah-Hartman
parent 93cc39b64c
commit 9a5c59687a

View File

@ -73,6 +73,10 @@ static inline void inc_ramster_flnodes(void)
if (ramster_flnodes > ramster_flnodes_max) if (ramster_flnodes > ramster_flnodes_max)
ramster_flnodes_max = ramster_flnodes; ramster_flnodes_max = ramster_flnodes;
} }
static inline void dec_ramster_flnodes(void)
{
ramster_flnodes = atomic_dec_return(&ramster_flnodes_atomic);
}
static ssize_t ramster_foreign_eph_pages; static ssize_t ramster_foreign_eph_pages;
static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0); static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0);
static ssize_t ramster_foreign_eph_pages_max; static ssize_t ramster_foreign_eph_pages_max;
@ -83,6 +87,11 @@ static inline void inc_ramster_foreign_eph_pages(void)
if (ramster_foreign_eph_pages > ramster_foreign_eph_pages_max) if (ramster_foreign_eph_pages > ramster_foreign_eph_pages_max)
ramster_foreign_eph_pages_max = ramster_foreign_eph_pages; ramster_foreign_eph_pages_max = ramster_foreign_eph_pages;
} }
static inline void dec_ramster_foreign_eph_pages(void)
{
ramster_foreign_eph_pages = atomic_dec_return(
&ramster_foreign_eph_pages_atomic);
}
static ssize_t ramster_foreign_pers_pages; static ssize_t ramster_foreign_pers_pages;
static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0); static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0);
static ssize_t ramster_foreign_pers_pages_max; static ssize_t ramster_foreign_pers_pages_max;
@ -93,6 +102,11 @@ static inline void inc_ramster_foreign_pers_pages(void)
if (ramster_foreign_pers_pages > ramster_foreign_pers_pages_max) if (ramster_foreign_pers_pages > ramster_foreign_pers_pages_max)
ramster_foreign_pers_pages_max = ramster_foreign_pers_pages; ramster_foreign_pers_pages_max = ramster_foreign_pers_pages;
} }
static inline void dec_ramster_foreign_pers_pages(void)
{
ramster_foreign_pers_pages = atomic_dec_return(
&ramster_foreign_pers_pages_atomic);
}
static ssize_t ramster_eph_pages_remoted; static ssize_t ramster_eph_pages_remoted;
static ssize_t ramster_pers_pages_remoted; static ssize_t ramster_pers_pages_remoted;
static ssize_t ramster_eph_pages_remote_failed; static ssize_t ramster_eph_pages_remote_failed;
@ -188,10 +202,8 @@ static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
static void ramster_flnode_free(struct flushlist_node *flnode, static void ramster_flnode_free(struct flushlist_node *flnode,
struct tmem_pool *pool) struct tmem_pool *pool)
{ {
int flnodes; dec_ramster_flnodes();
BUG_ON(ramster_flnodes < 0);
flnodes = atomic_dec_return(&ramster_flnodes_atomic);
BUG_ON(flnodes < 0);
kmem_cache_free(ramster_flnode_cache, flnode); kmem_cache_free(ramster_flnode_cache, flnode);
} }
@ -484,26 +496,20 @@ void *ramster_pampd_free(void *pampd, struct tmem_pool *pool,
void ramster_count_foreign_pages(bool eph, int count) void ramster_count_foreign_pages(bool eph, int count)
{ {
int c;
BUG_ON(count != 1 && count != -1); BUG_ON(count != 1 && count != -1);
if (eph) { if (eph) {
if (count > 0) { if (count > 0) {
inc_ramster_foreign_eph_pages(); inc_ramster_foreign_eph_pages();
} else { } else {
c = atomic_dec_return(&ramster_foreign_eph_pages_atomic); dec_ramster_foreign_eph_pages();
WARN_ON_ONCE(c < 0); WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
} }
ramster_foreign_eph_pages = c;
} else { } else {
if (count > 0) { if (count > 0) {
inc_ramster_foreign_pers_pages(); inc_ramster_foreign_pers_pages();
} else { } else {
c = atomic_dec_return( WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
&ramster_foreign_pers_pages_atomic);
WARN_ON_ONCE(c < 0);
} }
ramster_foreign_pers_pages = c;
} }
} }