forked from Minki/linux
dlm: use vmalloc for hash tables
Allocate dlm hash tables in the vmalloc area to allow a greater maximum size without restructuring of the hash table code. Signed-off-by: Bryn M. Reeves <bmr@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
parent
55b3286d3d
commit
c282af4990
@ -463,7 +463,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
|
|||||||
size = dlm_config.ci_rsbtbl_size;
|
size = dlm_config.ci_rsbtbl_size;
|
||||||
ls->ls_rsbtbl_size = size;
|
ls->ls_rsbtbl_size = size;
|
||||||
|
|
||||||
ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
|
ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
|
||||||
if (!ls->ls_rsbtbl)
|
if (!ls->ls_rsbtbl)
|
||||||
goto out_lsfree;
|
goto out_lsfree;
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
@ -475,7 +475,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
|
|||||||
size = dlm_config.ci_lkbtbl_size;
|
size = dlm_config.ci_lkbtbl_size;
|
||||||
ls->ls_lkbtbl_size = size;
|
ls->ls_lkbtbl_size = size;
|
||||||
|
|
||||||
ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
|
ls->ls_lkbtbl = vmalloc(sizeof(struct dlm_lkbtable) * size);
|
||||||
if (!ls->ls_lkbtbl)
|
if (!ls->ls_lkbtbl)
|
||||||
goto out_rsbfree;
|
goto out_rsbfree;
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
@ -487,7 +487,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
|
|||||||
size = dlm_config.ci_dirtbl_size;
|
size = dlm_config.ci_dirtbl_size;
|
||||||
ls->ls_dirtbl_size = size;
|
ls->ls_dirtbl_size = size;
|
||||||
|
|
||||||
ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
|
ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
|
||||||
if (!ls->ls_dirtbl)
|
if (!ls->ls_dirtbl)
|
||||||
goto out_lkbfree;
|
goto out_lkbfree;
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
@ -603,11 +603,11 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
|
|||||||
spin_unlock(&lslist_lock);
|
spin_unlock(&lslist_lock);
|
||||||
kfree(ls->ls_recover_buf);
|
kfree(ls->ls_recover_buf);
|
||||||
out_dirfree:
|
out_dirfree:
|
||||||
kfree(ls->ls_dirtbl);
|
vfree(ls->ls_dirtbl);
|
||||||
out_lkbfree:
|
out_lkbfree:
|
||||||
kfree(ls->ls_lkbtbl);
|
vfree(ls->ls_lkbtbl);
|
||||||
out_rsbfree:
|
out_rsbfree:
|
||||||
kfree(ls->ls_rsbtbl);
|
vfree(ls->ls_rsbtbl);
|
||||||
out_lsfree:
|
out_lsfree:
|
||||||
if (do_unreg)
|
if (do_unreg)
|
||||||
kobject_put(&ls->ls_kobj);
|
kobject_put(&ls->ls_kobj);
|
||||||
@ -721,7 +721,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
dlm_dir_clear(ls);
|
dlm_dir_clear(ls);
|
||||||
kfree(ls->ls_dirtbl);
|
vfree(ls->ls_dirtbl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free all lkb's on lkbtbl[] lists.
|
* Free all lkb's on lkbtbl[] lists.
|
||||||
@ -745,7 +745,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
|||||||
}
|
}
|
||||||
dlm_astd_resume();
|
dlm_astd_resume();
|
||||||
|
|
||||||
kfree(ls->ls_lkbtbl);
|
vfree(ls->ls_lkbtbl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free all rsb's on rsbtbl[] lists
|
* Free all rsb's on rsbtbl[] lists
|
||||||
@ -770,7 +770,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(ls->ls_rsbtbl);
|
vfree(ls->ls_rsbtbl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free structures on any other lists
|
* Free structures on any other lists
|
||||||
|
Loading…
Reference in New Issue
Block a user