mirror of
https://github.com/torvalds/linux.git
synced 2024-11-07 04:32:03 +00:00
fbe06b7bae
Christian found v3.9 does not work with E350 with EFI is enabled. [ 1.658832] Trying to unpack rootfs image as initramfs... [ 1.679935] BUG: unable to handle kernel paging request at ffff88006e3fd000 [ 1.686940] IP: [<ffffffff813661df>] memset+0x1f/0xb0 [ 1.692010] PGD 1f77067 PUD 1f7a067 PMD 61420067 PTE 0 but early memtest report all memory could be accessed without problem. early page table is set in following sequence: [ 0.000000] init_memory_mapping: [mem 0x00000000-0x000fffff] [ 0.000000] init_memory_mapping: [mem 0x6e600000-0x6e7fffff] [ 0.000000] init_memory_mapping: [mem 0x6c000000-0x6e5fffff] [ 0.000000] init_memory_mapping: [mem 0x00100000-0x6bffffff] [ 0.000000] init_memory_mapping: [mem 0x6e800000-0x6ea07fff] but later efi_enter_virtual_mode try set mapping again wrongly. [ 0.010644] pid_max: default: 32768 minimum: 301 [ 0.015302] init_memory_mapping: [mem 0x640c5000-0x6e3fcfff] that means it fails with pfn_range_is_mapped. It turns out that we have a bug in add_range_with_merge and it does not merge range properly when new add one fill the hole between two exsiting ranges. In the case when [mem 0x00100000-0x6bffffff] is the hole between [mem 0x00000000-0x000fffff] and [mem 0x6c000000-0x6e7fffff]. Fix the add_range_with_merge by calling itself recursively. Reported-by: "Christian König" <christian.koenig@amd.com> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/CAE9FiQVofGoSk7q5-0irjkBxemqK729cND4hov-1QCBJDhxpgQ@mail.gmail.com Cc: <stable@vger.kernel.org> v3.9 Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
163 lines
3.0 KiB
C
163 lines
3.0 KiB
C
/*
|
|
* Range add and subtract
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/sort.h>
|
|
|
|
#include <linux/range.h>
|
|
|
|
int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
|
|
{
|
|
if (start >= end)
|
|
return nr_range;
|
|
|
|
/* Out of slots: */
|
|
if (nr_range >= az)
|
|
return nr_range;
|
|
|
|
range[nr_range].start = start;
|
|
range[nr_range].end = end;
|
|
|
|
nr_range++;
|
|
|
|
return nr_range;
|
|
}
|
|
|
|
int add_range_with_merge(struct range *range, int az, int nr_range,
|
|
u64 start, u64 end)
|
|
{
|
|
int i;
|
|
|
|
if (start >= end)
|
|
return nr_range;
|
|
|
|
/* Try to merge it with old one: */
|
|
for (i = 0; i < nr_range; i++) {
|
|
u64 final_start, final_end;
|
|
u64 common_start, common_end;
|
|
|
|
if (!range[i].end)
|
|
continue;
|
|
|
|
common_start = max(range[i].start, start);
|
|
common_end = min(range[i].end, end);
|
|
if (common_start > common_end)
|
|
continue;
|
|
|
|
final_start = min(range[i].start, start);
|
|
final_end = max(range[i].end, end);
|
|
|
|
/* clear it and add it back for further merge */
|
|
range[i].start = 0;
|
|
range[i].end = 0;
|
|
return add_range_with_merge(range, az, nr_range,
|
|
final_start, final_end);
|
|
}
|
|
|
|
/* Need to add it: */
|
|
return add_range(range, az, nr_range, start, end);
|
|
}
|
|
|
|
void subtract_range(struct range *range, int az, u64 start, u64 end)
|
|
{
|
|
int i, j;
|
|
|
|
if (start >= end)
|
|
return;
|
|
|
|
for (j = 0; j < az; j++) {
|
|
if (!range[j].end)
|
|
continue;
|
|
|
|
if (start <= range[j].start && end >= range[j].end) {
|
|
range[j].start = 0;
|
|
range[j].end = 0;
|
|
continue;
|
|
}
|
|
|
|
if (start <= range[j].start && end < range[j].end &&
|
|
range[j].start < end) {
|
|
range[j].start = end;
|
|
continue;
|
|
}
|
|
|
|
|
|
if (start > range[j].start && end >= range[j].end &&
|
|
range[j].end > start) {
|
|
range[j].end = start;
|
|
continue;
|
|
}
|
|
|
|
if (start > range[j].start && end < range[j].end) {
|
|
/* Find the new spare: */
|
|
for (i = 0; i < az; i++) {
|
|
if (range[i].end == 0)
|
|
break;
|
|
}
|
|
if (i < az) {
|
|
range[i].end = range[j].end;
|
|
range[i].start = end;
|
|
} else {
|
|
pr_err("%s: run out of slot in ranges\n",
|
|
__func__);
|
|
}
|
|
range[j].end = start;
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
static int cmp_range(const void *x1, const void *x2)
|
|
{
|
|
const struct range *r1 = x1;
|
|
const struct range *r2 = x2;
|
|
s64 start1, start2;
|
|
|
|
start1 = r1->start;
|
|
start2 = r2->start;
|
|
|
|
return start1 - start2;
|
|
}
|
|
|
|
int clean_sort_range(struct range *range, int az)
|
|
{
|
|
int i, j, k = az - 1, nr_range = az;
|
|
|
|
for (i = 0; i < k; i++) {
|
|
if (range[i].end)
|
|
continue;
|
|
for (j = k; j > i; j--) {
|
|
if (range[j].end) {
|
|
k = j;
|
|
break;
|
|
}
|
|
}
|
|
if (j == i)
|
|
break;
|
|
range[i].start = range[k].start;
|
|
range[i].end = range[k].end;
|
|
range[k].start = 0;
|
|
range[k].end = 0;
|
|
k--;
|
|
}
|
|
/* count it */
|
|
for (i = 0; i < az; i++) {
|
|
if (!range[i].end) {
|
|
nr_range = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
/* sort them */
|
|
sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
|
|
|
|
return nr_range;
|
|
}
|
|
|
|
void sort_range(struct range *range, int nr_range)
|
|
{
|
|
/* sort them */
|
|
sort(range, nr_range, sizeof(struct range), cmp_range, NULL);
|
|
}
|