treewide: Add __GFP_NOWARN to k.alloc calls with v.alloc fallbacks
Don't emit OOM warnings when k.alloc calls fail when there there is a v.alloc immediately afterwards. Converted a kmalloc/vmalloc with memset to kzalloc/vzalloc. Signed-off-by: Joe Perches <joe@perches.com> Acked-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
This commit is contained in:
parent
85dbe70607
commit
8be04b9374
drivers
block/drbd
infiniband/hw/ehca
net/ethernet/chelsio
scsi/cxgbi
fs
net/sched
@ -393,7 +393,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
|
|||||||
* we must not block on IO to ourselves.
|
* we must not block on IO to ourselves.
|
||||||
* Context is receiver thread or dmsetup. */
|
* Context is receiver thread or dmsetup. */
|
||||||
bytes = sizeof(struct page *)*want;
|
bytes = sizeof(struct page *)*want;
|
||||||
new_pages = kzalloc(bytes, GFP_NOIO);
|
new_pages = kzalloc(bytes, GFP_NOIO | __GFP_NOWARN);
|
||||||
if (!new_pages) {
|
if (!new_pages) {
|
||||||
new_pages = __vmalloc(bytes,
|
new_pages = __vmalloc(bytes,
|
||||||
GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
|
GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
|
||||||
|
@ -222,7 +222,8 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
|
|||||||
queue->small_page = NULL;
|
queue->small_page = NULL;
|
||||||
|
|
||||||
/* allocate queue page pointers */
|
/* allocate queue page pointers */
|
||||||
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
|
queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
|
||||||
|
GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!queue->queue_pages) {
|
if (!queue->queue_pages) {
|
||||||
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
|
queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
|
||||||
if (!queue->queue_pages) {
|
if (!queue->queue_pages) {
|
||||||
|
@ -1157,7 +1157,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
|
|||||||
*/
|
*/
|
||||||
void *cxgb_alloc_mem(unsigned long size)
|
void *cxgb_alloc_mem(unsigned long size)
|
||||||
{
|
{
|
||||||
void *p = kzalloc(size, GFP_KERNEL);
|
void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
p = vzalloc(size);
|
p = vzalloc(size);
|
||||||
|
@ -1133,7 +1133,7 @@ out: release_firmware(fw);
|
|||||||
*/
|
*/
|
||||||
void *t4_alloc_mem(size_t size)
|
void *t4_alloc_mem(size_t size)
|
||||||
{
|
{
|
||||||
void *p = kzalloc(size, GFP_KERNEL);
|
void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
p = vzalloc(size);
|
p = vzalloc(size);
|
||||||
|
@ -658,11 +658,11 @@ static inline u32 cxgbi_tag_nonrsvd_bits(struct cxgbi_tag_format *tformat,
|
|||||||
static inline void *cxgbi_alloc_big_mem(unsigned int size,
|
static inline void *cxgbi_alloc_big_mem(unsigned int size,
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
void *p = kmalloc(size, gfp);
|
void *p = kzalloc(size, gfp | __GFP_NOWARN);
|
||||||
|
|
||||||
if (!p)
|
if (!p)
|
||||||
p = vmalloc(size);
|
p = vzalloc(size);
|
||||||
if (p)
|
|
||||||
memset(p, 0, size);
|
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +219,7 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
|
|||||||
len = PAGE_ALIGN(len);
|
len = PAGE_ALIGN(len);
|
||||||
|
|
||||||
if (p->buf == p->inline_buf) {
|
if (p->buf == p->inline_buf) {
|
||||||
tmp_buf = kmalloc(len, GFP_NOFS);
|
tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (!tmp_buf) {
|
if (!tmp_buf) {
|
||||||
tmp_buf = vmalloc(len);
|
tmp_buf = vmalloc(len);
|
||||||
if (!tmp_buf)
|
if (!tmp_buf)
|
||||||
|
@ -162,7 +162,7 @@ void *ext4_kvmalloc(size_t size, gfp_t flags)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = kmalloc(size, flags);
|
ret = kmalloc(size, flags | __GFP_NOWARN);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = __vmalloc(size, flags, PAGE_KERNEL);
|
ret = __vmalloc(size, flags, PAGE_KERNEL);
|
||||||
return ret;
|
return ret;
|
||||||
@ -172,7 +172,7 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
ret = kzalloc(size, flags);
|
ret = kzalloc(size, flags | __GFP_NOWARN);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
|
ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1859,7 +1859,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
|
|||||||
|
|
||||||
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
|
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
|
||||||
|
|
||||||
ht = kzalloc(size, GFP_NOFS);
|
ht = kzalloc(size, GFP_NOFS | __GFP_NOWARN);
|
||||||
if (ht == NULL)
|
if (ht == NULL)
|
||||||
ht = vzalloc(size);
|
ht = vzalloc(size);
|
||||||
if (!ht)
|
if (!ht)
|
||||||
|
@ -438,7 +438,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
if (mask != q->tab_mask) {
|
if (mask != q->tab_mask) {
|
||||||
struct sk_buff **ntab;
|
struct sk_buff **ntab;
|
||||||
|
|
||||||
ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
|
ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
|
||||||
|
GFP_KERNEL | __GFP_NOWARN);
|
||||||
if (!ntab)
|
if (!ntab)
|
||||||
ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
|
ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
|
||||||
if (!ntab)
|
if (!ntab)
|
||||||
|
Loading…
Reference in New Issue
Block a user