sched/rt, fs: Use CONFIG_PREEMPTION

CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by CONFIG_PREEMPT_RT.
Both PREEMPT and PREEMPT_RT require the same functionality which today
depends on CONFIG_PREEMPT.

Switch the i_size() and part_nr_sects_…() code over to use
CONFIG_PREEMPTION. Update the comment for fsstack_copy_inode_size() also
to refer to CONFIG_PREEMPTION.

[bigeasy: +PREEMPT comments]

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20191015191821.11479-24-bigeasy@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2019-10-15 21:18:10 +02:00 committed by Ingo Molnar
parent d4a3dcbc47
commit 2496396fcb
3 changed files with 8 additions and 8 deletions

View File

@ -23,7 +23,7 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
/* /*
* But on 32-bit, we ought to make an effort to keep the two halves of * But on 32-bit, we ought to make an effort to keep the two halves of
* i_blocks in sync despite SMP or PREEMPT - though stat's * i_blocks in sync despite SMP or PREEMPTION - though stat's
* generic_fillattr() doesn't bother, and we won't be applying quotas * generic_fillattr() doesn't bother, and we won't be applying quotas
* (where i_blocks does become important) at the upper level. * (where i_blocks does become important) at the upper level.
* *
@ -38,14 +38,14 @@ void fsstack_copy_inode_size(struct inode *dst, struct inode *src)
spin_unlock(&src->i_lock); spin_unlock(&src->i_lock);
/* /*
* If CONFIG_SMP or CONFIG_PREEMPT on 32-bit, it's vital for * If CONFIG_SMP or CONFIG_PREEMPTION on 32-bit, it's vital for
* fsstack_copy_inode_size() to hold some lock around * fsstack_copy_inode_size() to hold some lock around
* i_size_write(), otherwise i_size_read() may spin forever (see * i_size_write(), otherwise i_size_read() may spin forever (see
* include/linux/fs.h). We don't necessarily hold i_mutex when this * include/linux/fs.h). We don't necessarily hold i_mutex when this
* is called, so take i_lock for that case. * is called, so take i_lock for that case.
* *
* And if on 32-bit, continue our effort to keep the two halves of * And if on 32-bit, continue our effort to keep the two halves of
* i_blocks in sync despite SMP or PREEMPT: use i_lock for that case * i_blocks in sync despite SMP or PREEMPTION: use i_lock for that case
* too, and do both at once by combining the tests. * too, and do both at once by combining the tests.
* *
* There is none of this locking overhead in the 64-bit case. * There is none of this locking overhead in the 64-bit case.

View File

@ -855,7 +855,7 @@ static inline loff_t i_size_read(const struct inode *inode)
i_size = inode->i_size; i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq)); } while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size; return i_size;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size; loff_t i_size;
preempt_disable(); preempt_disable();
@ -880,7 +880,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size; inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount); write_seqcount_end(&inode->i_size_seqcount);
preempt_enable(); preempt_enable();
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable(); preempt_disable();
inode->i_size = i_size; inode->i_size = i_size;
preempt_enable(); preempt_enable();

View File

@ -718,7 +718,7 @@ static inline void hd_free_part(struct hd_struct *part)
* accessor function. * accessor function.
* *
* Code written along the lines of i_size_read() and i_size_write(). * Code written along the lines of i_size_read() and i_size_write().
* CONFIG_PREEMPT case optimizes the case of UP kernel with preemption * CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
* on. * on.
*/ */
static inline sector_t part_nr_sects_read(struct hd_struct *part) static inline sector_t part_nr_sects_read(struct hd_struct *part)
@ -731,7 +731,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
nr_sects = part->nr_sects; nr_sects = part->nr_sects;
} while (read_seqcount_retry(&part->nr_sects_seq, seq)); } while (read_seqcount_retry(&part->nr_sects_seq, seq));
return nr_sects; return nr_sects;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
sector_t nr_sects; sector_t nr_sects;
preempt_disable(); preempt_disable();
@ -754,7 +754,7 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
write_seqcount_begin(&part->nr_sects_seq); write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size; part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq); write_seqcount_end(&part->nr_sects_seq);
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT) #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable(); preempt_disable();
part->nr_sects = size; part->nr_sects = size;
preempt_enable(); preempt_enable();