One vdso fix for a longstanding ASLR bug that's been in the news lately.
The vdso base address has always been randomized, and I don't think there's anything particularly wrong with the range over which it's randomized, but the implementation seems to have been buggy since the very beginning. This fixes the implementation to remove a large bias that caused a small fraction of possible vdso load addresess to be vastly more likely than the rest of the possible addresses. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJUlhwwAAoJEK9N98ZeDfrklIcH/2/4P/ffRcCp0qYo7gFpXwPh bWem3ygB4xmBgiivwGJOx/GgE6/QGmedZmD6EDLoLmpuOvGjdp4iRmvU1dCSDWOE bMOBa1cxC4TPGzqlbGgjmyHgMPuihJq6GInAqmpJk/hZJ8W7JfnXyoZbt9pj4UBW gYKMLa0gSF/rMTZ5hkDQ6mVH65M7jJnmHLRydTpK8Ryfap2lu01MIr6mC6xaobVc 5NfbI8bZexQECGLmPsFRnFWrYXNz86PKtN0j4YnPRVPRbBSTi8nHGu+zK2CJXpgu 9hyZ0eOdqSEi4wwQgI9g9lZSsa1C+5QBc6BwiDy5XjToPJOx5EnT6+m9O+fy9Q8= =BVMi -----END PGP SIGNATURE----- Merge tag 'pr-20141220-x86-vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/luto/linux into x86/urgent Pull a VDSO fix from Andy Lutomirski: "One vdso fix for a longstanding ASLR bug that's been in the news lately. The vdso base address has always been randomized, and I don't think there's anything particularly wrong with the range over which it's randomized, but the implementation seems to have been buggy since the very beginning. This fixes the implementation to remove a large bias that caused a small fraction of possible vdso load addresess to be vastly more likely than the rest of the possible addresses." Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
fbe1bf1406
@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
|
||||
|
||||
struct linux_binprm;
|
||||
|
||||
/* Put the vdso above the (randomized) stack with another randomized offset.
|
||||
This way there is no hole in the middle of address space.
|
||||
To save memory make sure it is still in the same PTE as the stack top.
|
||||
This doesn't give that many random bits.
|
||||
|
||||
Only used for the 64-bit and x32 vdsos. */
|
||||
/*
|
||||
* Put the vdso above the (randomized) stack with another randomized
|
||||
* offset. This way there is no hole in the middle of address space.
|
||||
* To save memory make sure it is still in the same PTE as the stack
|
||||
* top. This doesn't give that many random bits.
|
||||
*
|
||||
* Note that this algorithm is imperfect: the distribution of the vdso
|
||||
* start address within a PMD is biased toward the end.
|
||||
*
|
||||
* Only used for the 64-bit and x32 vdsos.
|
||||
*/
|
||||
static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||
#else
|
||||
unsigned long addr, end;
|
||||
unsigned offset;
|
||||
end = (start + PMD_SIZE - 1) & PMD_MASK;
|
||||
|
||||
/*
|
||||
* Round up the start address. It can start out unaligned as a result
|
||||
* of stack start randomization.
|
||||
*/
|
||||
start = PAGE_ALIGN(start);
|
||||
|
||||
/* Round the lowest possible end address up to a PMD boundary. */
|
||||
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
|
||||
if (end >= TASK_SIZE_MAX)
|
||||
end = TASK_SIZE_MAX;
|
||||
end -= len;
|
||||
/* This loses some more bits than a modulo, but is cheaper */
|
||||
offset = get_random_int() & (PTRS_PER_PTE - 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
if (addr >= end)
|
||||
addr = end;
|
||||
|
||||
if (end > start) {
|
||||
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
|
||||
addr = start + (offset << PAGE_SHIFT);
|
||||
} else {
|
||||
addr = start;
|
||||
}
|
||||
|
||||
/*
|
||||
* page-align it here so that get_unmapped_area doesn't
|
||||
* align it wrongfully again to the next page. addr can come in 4K
|
||||
* unaligned here as a result of stack start randomization.
|
||||
* Forcibly align the final address in case we have a hardware
|
||||
* issue that requires alignment for performance reasons.
|
||||
*/
|
||||
addr = PAGE_ALIGN(addr);
|
||||
addr = align_vdso_addr(addr);
|
||||
|
||||
return addr;
|
||||
|
Loading…
Reference in New Issue
Block a user