linux/arch/mips/kernel/module.c

411 lines
9.9 KiB
C
Raw Normal View History

/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 Thiemo Seufer
*/
#undef DEBUG
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 15:08:31 +00:00
#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
Elf_Addr *addr;
Elf_Addr value;
};
static struct mips_hi16 *mips_hi16_list;
static LIST_HEAD(dbe_list);
static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 15:08:31 +00:00
#ifdef MODULE_START
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!size)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
#else
if (size == 0)
return NULL;
return vmalloc(size);
[MIPS] Load modules to CKSEG0 if CONFIG_BUILD_ELF64=n This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2006-10-25 15:08:31 +00:00
#endif
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
/* FIXME: If module_region == mod->init_region, trim exception
table entries. */
}
int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
return 0;
}
static int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v)
{
return 0;
}
static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v)
{
*location += v;
return 0;
}
static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = v;
return 0;
}
static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) |
((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
printk(KERN_ERR "module %s: dangerous relocation\n", me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
printk(KERN_ERR
"module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
*location = (*location & ~0x03ffffff) | ((v >> 2) & 0x03ffffff);
return 0;
}
static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v)
{
struct mips_hi16 *n;
/*
* We cannot relocate this one now because we don't know the value of
* the carry we need to add. Save the information, and let LO16 do the
* actual relocation.
*/
n = kmalloc(sizeof *n, GFP_KERNEL);
if (!n)
return -ENOMEM;
n->addr = (Elf_Addr *)location;
n->value = v;
n->next = mips_hi16_list;
mips_hi16_list = n;
return 0;
}
static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
{
unsigned long insnlo = *location;
Elf_Addr val, vallo;
/* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
struct mips_hi16 *l;
l = mips_hi16_list;
while (l != NULL) {
struct mips_hi16 *next;
unsigned long insn;
/*
* The value for the HI16 had best be the same.
*/
if (v != l->value)
goto out_danger;
/*
* Do the HI16 relocation. Note that we actually don't
* need to know anything about the LO16 itself, except
* where to find the low 16 bits of the addend needed
* by the LO16.
*/
insn = *l->addr;
val = ((insn & 0xffff) << 16) + vallo;
val += v;
/*
* Account for the sign extension that will happen in
* the low bits.
*/
val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
insn = (insn & ~0xffff) | val;
*l->addr = insn;
next = l->next;
kfree(l);
l = next;
}
mips_hi16_list = NULL;
}
/*
* Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
*location = insnlo;
return 0;
out_danger:
printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
return -ENOEXEC;
}
static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) | (v & 0xffff);
return 0;
}
static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
{
*(Elf_Addr *)location = v;
return 0;
}
static int apply_r_mips_higher_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
static int apply_r_mips_highest_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rel,
[R_MIPS_26] = apply_r_mips_26_rel,
[R_MIPS_HI16] = apply_r_mips_hi16_rel,
[R_MIPS_LO16] = apply_r_mips_lo16_rel
};
static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rela,
[R_MIPS_26] = apply_r_mips_26_rela,
[R_MIPS_HI16] = apply_r_mips_hi16_rela,
[R_MIPS_LO16] = apply_r_mips_lo16_rela,
[R_MIPS_64] = apply_r_mips_64_rela,
[R_MIPS_HIGHER] = apply_r_mips_higher_rela,
[R_MIPS_HIGHEST] = apply_r_mips_highest_rela
};
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (!sym->st_value) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value;
res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
return 0;
}
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u32 *location;
unsigned int i;
Elf_Addr v;
int res;
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
if (!sym->st_value) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
printk(KERN_WARNING "%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
v = sym->st_value + rel[i].r_addend;
res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
if (res)
return res;
}
return 0;
}
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_dbetables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct mod_arch_specific *dbe;
spin_lock_irqsave(&dbe_lock, flags);
list_for_each_entry(dbe, &dbe_list, dbe_list) {
e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr);
if (e)
break;
}
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
return e;
}
/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
INIT_LIST_HEAD(&me->arch.dbe_list);
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
continue;
me->arch.dbe_start = (void *)s->sh_addr;
me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
spin_lock_irq(&dbe_lock);
list_add(&me->arch.dbe_list, &dbe_list);
spin_unlock_irq(&dbe_lock);
}
return 0;
}
void module_arch_cleanup(struct module *mod)
{
spin_lock_irq(&dbe_lock);
list_del(&mod->arch.dbe_list);
spin_unlock_irq(&dbe_lock);
}