Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  Revert "mm: add /proc controls for pdflush threads"
  viocd: needs to depend on BLOCK
  block: fix the bio_vec array index out-of-bounds test
This commit is contained in:
Linus Torvalds 2009-05-15 08:05:37 -07:00
commit c653849981
6 changed files with 15 additions and 74 deletions

View File

@ -39,8 +39,6 @@ Currently, these files are in /proc/sys/vm:
- nr_hugepages
- nr_overcommit_hugepages
- nr_pdflush_threads
- nr_pdflush_threads_min
- nr_pdflush_threads_max
- nr_trim_pages (only if CONFIG_MMU=n)
- numa_zonelist_order
- oom_dump_tasks
@ -469,32 +467,6 @@ The default value is 0.
==============================================================
nr_pdflush_threads_min
This value controls the minimum number of pdflush threads.
At boot time, the kernel will create and maintain 'nr_pdflush_threads_min'
threads for the kernel's lifetime.
The default value is 2. The minimum value you can specify is 1, and
the maximum value is the current setting of 'nr_pdflush_threads_max'.
See 'nr_pdflush_threads_max' below for more information.
==============================================================
nr_pdflush_threads_max
This value controls the maximum number of pdflush threads that can be
created. The pdflush algorithm will create a new pdflush thread (up to
this maximum) if no pdflush threads have been available for >= 1 second.
The default value is 8. The minimum value you can specify is the
current value of 'nr_pdflush_threads_min' and the
maximum is 1000.
==============================================================
overcommit_memory:
This value contains a flag that enables memory overcommitment.

View File

@ -17,6 +17,7 @@ config VIODASD
config VIOCD
tristate "iSeries Virtual I/O CD support"
depends on BLOCK
select VIOPATH
help
If you are running Linux on an IBM iSeries system and you want to

View File

@ -1768,10 +1768,10 @@ static int __end_that_request_first(struct request *req, int error,
} else {
int idx = bio->bi_idx + next_idx;
if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
if (unlikely(idx >= bio->bi_vcnt)) {
blk_dump_rq_flags(req, "__end_that");
printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
__func__, bio->bi_idx, bio->bi_vcnt);
__func__, idx, bio->bi_vcnt);
break;
}

View File

@ -168,8 +168,6 @@ void writeback_set_ratelimit(void);
/* pdflush.c */
extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
read-only. */
extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */
extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */
#endif /* WRITEBACK_H */

View File

@ -101,7 +101,6 @@ static int __maybe_unused one = 1;
static int __maybe_unused two = 2;
static unsigned long one_ul = 1;
static int one_hundred = 100;
static int one_thousand = 1000;
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@ -1033,28 +1032,6 @@ static struct ctl_table vm_table[] = {
.mode = 0444 /* read-only*/,
.proc_handler = &proc_dointvec,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "nr_pdflush_threads_min",
.data = &nr_pdflush_threads_min,
.maxlen = sizeof nr_pdflush_threads_min,
.mode = 0644 /* read-write */,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &one,
.extra2 = &nr_pdflush_threads_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "nr_pdflush_threads_max",
.data = &nr_pdflush_threads_max,
.maxlen = sizeof nr_pdflush_threads_max,
.mode = 0644 /* read-write */,
.proc_handler = &proc_dointvec_minmax,
.strategy = &sysctl_intvec,
.extra1 = &nr_pdflush_threads_min,
.extra2 = &one_thousand,
},
{
.ctl_name = VM_SWAPPINESS,
.procname = "swappiness",

View File

@ -57,14 +57,6 @@ static DEFINE_SPINLOCK(pdflush_lock);
*/
int nr_pdflush_threads = 0;
/*
* The max/min number of pdflush threads. R/W by sysctl at
* /proc/sys/vm/nr_pdflush_threads_max/min
*/
int nr_pdflush_threads_max __read_mostly = MAX_PDFLUSH_THREADS;
int nr_pdflush_threads_min __read_mostly = MIN_PDFLUSH_THREADS;
/*
* The time at which the pdflush thread pool last went empty
*/
@ -76,7 +68,7 @@ static unsigned long last_empty_jifs;
* Thread pool management algorithm:
*
* - The minimum and maximum number of pdflush instances are bound
* by nr_pdflush_threads_min and nr_pdflush_threads_max.
* by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS.
*
* - If there have been no idle pdflush instances for 1 second, create
* a new one.
@ -142,13 +134,14 @@ static int __pdflush(struct pdflush_work *my_work)
* To throttle creation, we reset last_empty_jifs.
*/
if (time_after(jiffies, last_empty_jifs + 1 * HZ)) {
if (list_empty(&pdflush_list) &&
nr_pdflush_threads < nr_pdflush_threads_max) {
last_empty_jifs = jiffies;
nr_pdflush_threads++;
spin_unlock_irq(&pdflush_lock);
start_one_pdflush_thread();
spin_lock_irq(&pdflush_lock);
if (list_empty(&pdflush_list)) {
if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) {
last_empty_jifs = jiffies;
nr_pdflush_threads++;
spin_unlock_irq(&pdflush_lock);
start_one_pdflush_thread();
spin_lock_irq(&pdflush_lock);
}
}
}
@ -160,7 +153,7 @@ static int __pdflush(struct pdflush_work *my_work)
*/
if (list_empty(&pdflush_list))
continue;
if (nr_pdflush_threads <= nr_pdflush_threads_min)
if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS)
continue;
pdf = list_entry(pdflush_list.prev, struct pdflush_work, list);
if (time_after(jiffies, pdf->when_i_went_to_sleep + 1 * HZ)) {
@ -266,9 +259,9 @@ static int __init pdflush_init(void)
* Pre-set nr_pdflush_threads... If we fail to create,
* the count will be decremented.
*/
nr_pdflush_threads = nr_pdflush_threads_min;
nr_pdflush_threads = MIN_PDFLUSH_THREADS;
for (i = 0; i < nr_pdflush_threads_min; i++)
for (i = 0; i < MIN_PDFLUSH_THREADS; i++)
start_one_pdflush_thread();
return 0;
}