dect
/
linux-2.6
Archived
13
0
Fork 0

[MIPS] Fix loads of section missmatches

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Ralf Baechle 2008-03-08 09:56:28 +00:00
parent 1af0eea214
commit 234fcd1484
30 changed files with 171 additions and 180 deletions

View File

@ -167,7 +167,7 @@ static inline void check_mult_sh(void)
panic(bug64hit, !R4000_WAR ? r4kwar : nowar); panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
} }
static volatile int daddi_ov __initdata = 0; static volatile int daddi_ov __cpuinitdata = 0;
asmlinkage void __init do_daddi_ov(struct pt_regs *regs) asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
{ {
@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
} }
int daddiu_bug __initdata = -1; int daddiu_bug __cpuinitdata = -1;
static inline void check_daddiu(void) static inline void check_daddiu(void)
{ {

View File

@ -550,7 +550,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c)
} }
} }
static char unknown_isa[] __initdata = KERN_ERR \ static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d."; "Unsupported ISA type, c0.config0: %d.";
static inline unsigned int decode_config0(struct cpuinfo_mips *c) static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@ -656,7 +656,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
return config3 & MIPS_CONF_M; return config3 & MIPS_CONF_M;
} }
static void __init decode_configs(struct cpuinfo_mips *c) static void __cpuinit decode_configs(struct cpuinfo_mips *c)
{ {
/* MIPS32 or MIPS64 compliant CPU. */ /* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
@ -814,7 +814,7 @@ const char *__cpu_name[NR_CPUS];
/* /*
* Name a CPU * Name a CPU
*/ */
static __init const char *cpu_to_name(struct cpuinfo_mips *c) static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
{ {
const char *name = NULL; const char *name = NULL;
@ -896,7 +896,7 @@ static __init const char *cpu_to_name(struct cpuinfo_mips *c)
return name; return name;
} }
__init void cpu_probe(void) __cpuinit void cpu_probe(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
@ -959,7 +959,7 @@ __init void cpu_probe(void)
c->srsets = 1; c->srsets = 1;
} }
__init void cpu_report(void) __cpuinit void cpu_report(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;

View File

@ -195,7 +195,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
j start_kernel j start_kernel
END(kernel_entry) END(kernel_entry)
__INIT __CPUINIT
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*

View File

@ -1306,7 +1306,7 @@ int cp0_compare_irq;
int cp0_perfcount_irq; int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq); EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
void __init per_cpu_trap_init(void) void __cpuinit per_cpu_trap_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0; unsigned int status_set = ST0_CU0;
@ -1423,11 +1423,12 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size)
flush_icache_range(ebase + offset, ebase + offset + size); flush_icache_range(ebase + offset, ebase + offset + size);
} }
static char panic_null_cerr[] __initdata = static char panic_null_cerr[] __cpuinitdata =
"Trying to set NULL cache error exception handler"; "Trying to set NULL cache error exception handler";
/* Install uncached CPU exception handler */ /* Install uncached CPU exception handler */
void __init set_uncached_handler(unsigned long offset, void *addr, unsigned long size) void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{ {
#ifdef CONFIG_32BIT #ifdef CONFIG_32BIT
unsigned long uncached_ebase = KSEG1ADDR(ebase); unsigned long uncached_ebase = KSEG1ADDR(ebase);

View File

@ -36,7 +36,7 @@
* values, so we can avoid sharing the same stack area between a cached * values, so we can avoid sharing the same stack area between a cached
* and the uncached mode. * and the uncached mode.
*/ */
unsigned long __init run_uncached(void *func) unsigned long __cpuinit run_uncached(void *func)
{ {
register long sp __asm__("$sp"); register long sp __asm__("$sp");
register long ret __asm__("$2"); register long ret __asm__("$2");

View File

@ -146,7 +146,7 @@ void __init plat_perf_setup(void)
} }
} }
unsigned int __init get_c0_compare_int(void) unsigned int __cpuinit get_c0_compare_int(void)
{ {
#ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE
if (cpu_has_veic) { if (cpu_has_veic) {

View File

@ -83,7 +83,7 @@ static void mips_timer_dispatch(void)
} }
unsigned __init get_c0_compare_int(void) unsigned __cpuinit get_c0_compare_int(void)
{ {
#ifdef MSC01E_INT_BASE #ifdef MSC01E_INT_BASE
if (cpu_has_veic) { if (cpu_has_veic) {

View File

@ -307,7 +307,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
r3k_flush_dcache_range(start, start + size); r3k_flush_dcache_range(start, start + size);
} }
void __init r3k_cache_init(void) void __cpuinit r3k_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);

View File

@ -93,7 +93,7 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
blast_dcache32_page(addr); blast_dcache32_page(addr);
} }
static void __init r4k_blast_dcache_page_setup(void) static void __cpuinit r4k_blast_dcache_page_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
@ -107,7 +107,7 @@ static void __init r4k_blast_dcache_page_setup(void)
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
static void __init r4k_blast_dcache_page_indexed_setup(void) static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
@ -121,7 +121,7 @@ static void __init r4k_blast_dcache_page_indexed_setup(void)
static void (* r4k_blast_dcache)(void); static void (* r4k_blast_dcache)(void);
static void __init r4k_blast_dcache_setup(void) static void __cpuinit r4k_blast_dcache_setup(void)
{ {
unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size();
@ -206,7 +206,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
static void (* r4k_blast_icache_page)(unsigned long addr); static void (* r4k_blast_icache_page)(unsigned long addr);
static void __init r4k_blast_icache_page_setup(void) static void __cpuinit r4k_blast_icache_page_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
@ -223,7 +223,7 @@ static void __init r4k_blast_icache_page_setup(void)
static void (* r4k_blast_icache_page_indexed)(unsigned long addr); static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
static void __init r4k_blast_icache_page_indexed_setup(void) static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
@ -247,7 +247,7 @@ static void __init r4k_blast_icache_page_indexed_setup(void)
static void (* r4k_blast_icache)(void); static void (* r4k_blast_icache)(void);
static void __init r4k_blast_icache_setup(void) static void __cpuinit r4k_blast_icache_setup(void)
{ {
unsigned long ic_lsize = cpu_icache_line_size(); unsigned long ic_lsize = cpu_icache_line_size();
@ -268,7 +268,7 @@ static void __init r4k_blast_icache_setup(void)
static void (* r4k_blast_scache_page)(unsigned long addr); static void (* r4k_blast_scache_page)(unsigned long addr);
static void __init r4k_blast_scache_page_setup(void) static void __cpuinit r4k_blast_scache_page_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
@ -286,7 +286,7 @@ static void __init r4k_blast_scache_page_setup(void)
static void (* r4k_blast_scache_page_indexed)(unsigned long addr); static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
static void __init r4k_blast_scache_page_indexed_setup(void) static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
@ -304,7 +304,7 @@ static void __init r4k_blast_scache_page_indexed_setup(void)
static void (* r4k_blast_scache)(void); static void (* r4k_blast_scache)(void);
static void __init r4k_blast_scache_setup(void) static void __cpuinit r4k_blast_scache_setup(void)
{ {
unsigned long sc_lsize = cpu_scache_line_size(); unsigned long sc_lsize = cpu_scache_line_size();
@ -691,11 +691,11 @@ static inline void rm7k_erratum31(void)
} }
} }
static char *way_string[] __initdata = { NULL, "direct mapped", "2-way", static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
"3-way", "4-way", "5-way", "6-way", "7-way", "8-way" "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
}; };
static void __init probe_pcache(void) static void __cpuinit probe_pcache(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
@ -1016,7 +1016,7 @@ static void __init probe_pcache(void)
* executes in KSEG1 space or else you will crash and burn badly. You have * executes in KSEG1 space or else you will crash and burn badly. You have
* been warned. * been warned.
*/ */
static int __init probe_scache(void) static int __cpuinit probe_scache(void)
{ {
unsigned long flags, addr, begin, end, pow2; unsigned long flags, addr, begin, end, pow2;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
@ -1095,7 +1095,7 @@ extern int r5k_sc_init(void);
extern int rm7k_sc_init(void); extern int rm7k_sc_init(void);
extern int mips_sc_init(void); extern int mips_sc_init(void);
static void __init setup_scache(void) static void __cpuinit setup_scache(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
@ -1206,7 +1206,7 @@ void au1x00_fixup_config_od(void)
} }
} }
static void __init coherency_setup(void) static void __cpuinit coherency_setup(void)
{ {
change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
@ -1238,7 +1238,7 @@ static void __init coherency_setup(void)
} }
} }
void __init r4k_cache_init(void) void __cpuinit r4k_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);

View File

@ -329,7 +329,7 @@ static __init void tx39_probe_cache(void)
} }
} }
void __init tx39_cache_init(void) void __cpuinit tx39_cache_init(void)
{ {
extern void build_clear_page(void); extern void build_clear_page(void);
extern void build_copy_page(void); extern void build_copy_page(void);

View File

@ -127,9 +127,10 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
} }
} }
static char cache_panic[] __initdata = "Yeee, unsupported cache architecture."; static char cache_panic[] __cpuinitdata =
"Yeee, unsupported cache architecture.";
void __init cpu_cache_init(void) void __devinit cpu_cache_init(void)
{ {
if (cpu_has_3k_cache) { if (cpu_has_3k_cache) {
extern void __weak r3k_cache_init(void); extern void __weak r3k_cache_init(void);

View File

@ -34,8 +34,6 @@
* is changed. * is changed.
*/ */
__INIT
.set mips64 .set mips64
.set noreorder .set noreorder
.set noat .set noat
@ -51,6 +49,8 @@
* (0x170-0x17f) are used to preserve k0, k1, and ra. * (0x170-0x17f) are used to preserve k0, k1, and ra.
*/ */
__CPUINIT
LEAF(except_vec2_sb1) LEAF(except_vec2_sb1)
/* /*
* If this error is recoverable, we need to exit the handler * If this error is recoverable, we need to exit the handler

View File

@ -66,21 +66,21 @@ EXPORT_SYMBOL(copy_page);
* with 64-bit kernels. The prefetch offsets have been experimentally tuned * with 64-bit kernels. The prefetch offsets have been experimentally tuned
* an Origin 200. * an Origin 200.
*/ */
static int pref_offset_clear __initdata = 512; static int pref_offset_clear __cpuinitdata = 512;
static int pref_offset_copy __initdata = 256; static int pref_offset_copy __cpuinitdata = 256;
static unsigned int pref_src_mode __initdata; static unsigned int pref_src_mode __cpuinitdata;
static unsigned int pref_dst_mode __initdata; static unsigned int pref_dst_mode __cpuinitdata;
static int load_offset __initdata; static int load_offset __cpuinitdata;
static int store_offset __initdata; static int store_offset __cpuinitdata;
static unsigned int __initdata *dest, *epc; static unsigned int __cpuinitdata *dest, *epc;
static unsigned int instruction_pending; static unsigned int instruction_pending;
static union mips_instruction delayed_mi; static union mips_instruction delayed_mi;
static void __init emit_instruction(union mips_instruction mi) static void __cpuinit emit_instruction(union mips_instruction mi)
{ {
if (instruction_pending) if (instruction_pending)
*epc++ = delayed_mi.word; *epc++ = delayed_mi.word;
@ -222,7 +222,7 @@ static inline void build_cdex_p(void)
emit_instruction(mi); emit_instruction(mi);
} }
static void __init __build_store_reg(int reg) static void __cpuinit __build_store_reg(int reg)
{ {
union mips_instruction mi; union mips_instruction mi;
unsigned int width; unsigned int width;
@ -339,7 +339,7 @@ static inline void build_jr_ra(void)
flush_delay_slot_or_nop(); flush_delay_slot_or_nop();
} }
void __init build_clear_page(void) void __cpuinit build_clear_page(void)
{ {
unsigned int loop_start; unsigned int loop_start;
unsigned long off; unsigned long off;
@ -442,7 +442,7 @@ dest = label();
pr_debug("\t.set pop\n"); pr_debug("\t.set pop\n");
} }
void __init build_copy_page(void) void __cpuinit build_copy_page(void)
{ {
unsigned int loop_start; unsigned int loop_start;
unsigned long off; unsigned long off;

View File

@ -293,10 +293,10 @@ void copy_page(void *to, void *from)
EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(copy_page);
void __init build_clear_page(void) void __cpuinit build_clear_page(void)
{ {
} }
void __init build_copy_page(void) void __cpuinit build_copy_page(void)
{ {
} }

View File

@ -168,7 +168,7 @@ struct bcache_ops indy_sc_ops = {
.bc_inv = indy_sc_wback_invalidate .bc_inv = indy_sc_wback_invalidate
}; };
void __init indy_sc_init(void) void __cpuinit indy_sc_init(void)
{ {
if (indy_sc_probe()) { if (indy_sc_probe()) {
indy_sc_enable(); indy_sc_enable();

View File

@ -100,7 +100,7 @@ static inline int __init mips_sc_probe(void)
return 1; return 1;
} }
int __init mips_sc_init(void) int __cpuinit mips_sc_init(void)
{ {
int found = mips_sc_probe(); int found = mips_sc_probe();
if (found) { if (found) {
@ -109,4 +109,3 @@ int __init mips_sc_init(void)
} }
return found; return found;
} }

View File

@ -99,7 +99,7 @@ static struct bcache_ops r5k_sc_ops = {
.bc_inv = r5k_dma_cache_inv_sc .bc_inv = r5k_dma_cache_inv_sc
}; };
void __init r5k_sc_init(void) void __cpuinit r5k_sc_init(void)
{ {
if (r5k_sc_probe()) { if (r5k_sc_probe()) {
r5k_sc_enable(); r5k_sc_enable();

View File

@ -128,7 +128,7 @@ struct bcache_ops rm7k_sc_ops = {
.bc_inv = rm7k_sc_inv .bc_inv = rm7k_sc_inv
}; };
void __init rm7k_sc_init(void) void __cpuinit rm7k_sc_init(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();

View File

@ -281,7 +281,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
} }
} }
void __init tlb_init(void) void __cpuinit tlb_init(void)
{ {
local_flush_tlb_all(); local_flush_tlb_all();

View File

@ -388,7 +388,7 @@ void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
* lifetime of the system * lifetime of the system
*/ */
static int temp_tlb_entry __initdata; static int temp_tlb_entry __cpuinitdata;
__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask) unsigned long entryhi, unsigned long pagemask)
@ -427,7 +427,7 @@ out:
return ret; return ret;
} }
static void __init probe_tlb(unsigned long config) static void __cpuinit probe_tlb(unsigned long config)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int reg; unsigned int reg;
@ -455,7 +455,7 @@ static void __init probe_tlb(unsigned long config)
c->tlbsize = ((reg >> 25) & 0x3f) + 1; c->tlbsize = ((reg >> 25) & 0x3f) + 1;
} }
static int __initdata ntlb = 0; static int __cpuinitdata ntlb = 0;
static int __init set_ntlb(char *str) static int __init set_ntlb(char *str)
{ {
get_option(&str, &ntlb); get_option(&str, &ntlb);
@ -464,7 +464,7 @@ static int __init set_ntlb(char *str)
__setup("ntlb=", set_ntlb); __setup("ntlb=", set_ntlb);
void __init tlb_init(void) void __cpuinit tlb_init(void)
{ {
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();

View File

@ -214,14 +214,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void __init probe_tlb(unsigned long config) static void __cpuinit probe_tlb(unsigned long config)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
} }
void __init tlb_init(void) void __cpuinit tlb_init(void)
{ {
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
unsigned long status; unsigned long status;

View File

@ -60,7 +60,7 @@ static inline int __maybe_unused r10000_llsc_war(void)
* why; it's not an issue caused by the core RTL. * why; it's not an issue caused by the core RTL.
* *
*/ */
static int __init m4kc_tlbp_war(void) static int __cpuinit m4kc_tlbp_war(void)
{ {
return (current_cpu_data.processor_id & 0xffff00) == return (current_cpu_data.processor_id & 0xffff00) ==
(PRID_COMP_MIPS | PRID_IMP_4KC); (PRID_COMP_MIPS | PRID_IMP_4KC);
@ -144,16 +144,16 @@ static inline void dump_handler(const u32 *handler, int count)
* We deliberately chose a buffer size of 128, so we won't scribble * We deliberately chose a buffer size of 128, so we won't scribble
* over anything important on overflow before we panic. * over anything important on overflow before we panic.
*/ */
static u32 tlb_handler[128] __initdata; static u32 tlb_handler[128] __cpuinitdata;
/* simply assume worst case size for labels and relocs */ /* simply assume worst case size for labels and relocs */
static struct uasm_label labels[128] __initdata; static struct uasm_label labels[128] __cpuinitdata;
static struct uasm_reloc relocs[128] __initdata; static struct uasm_reloc relocs[128] __cpuinitdata;
/* /*
* The R3000 TLB handler is simple. * The R3000 TLB handler is simple.
*/ */
static void __init build_r3000_tlb_refill_handler(void) static void __cpuinit build_r3000_tlb_refill_handler(void)
{ {
long pgdc = (long)pgd_current; long pgdc = (long)pgd_current;
u32 *p; u32 *p;
@ -197,7 +197,7 @@ static void __init build_r3000_tlb_refill_handler(void)
* other one.To keep things simple, we first assume linear space, * other one.To keep things simple, we first assume linear space,
* then we relocate it to the final handler layout as needed. * then we relocate it to the final handler layout as needed.
*/ */
static u32 final_handler[64] __initdata; static u32 final_handler[64] __cpuinitdata;
/* /*
* Hazards * Hazards
@ -221,7 +221,7 @@ static u32 final_handler[64] __initdata;
* *
* As if we MIPS hackers wouldn't know how to nop pipelines happy ... * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/ */
static void __init __maybe_unused build_tlb_probe_entry(u32 **p) static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
{ {
switch (current_cpu_type()) { switch (current_cpu_type()) {
/* Found by experiment: R4600 v2.0 needs this, too. */ /* Found by experiment: R4600 v2.0 needs this, too. */
@ -245,7 +245,7 @@ static void __init __maybe_unused build_tlb_probe_entry(u32 **p)
*/ */
enum tlb_write_entry { tlb_random, tlb_indexed }; enum tlb_write_entry { tlb_random, tlb_indexed };
static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l, static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, struct uasm_reloc **r,
enum tlb_write_entry wmode) enum tlb_write_entry wmode)
{ {
@ -389,7 +389,7 @@ static void __init build_tlb_write_entry(u32 **p, struct uasm_label **l,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pmd entry. * TMP will be clobbered, PTR will hold the pmd entry.
*/ */
static void __init static void __cpuinit
build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int tmp, unsigned int ptr) unsigned int tmp, unsigned int ptr)
{ {
@ -450,7 +450,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* BVADDR is the faulting address, PTR is scratch. * BVADDR is the faulting address, PTR is scratch.
* PTR will hold the pgd for vmalloc. * PTR will hold the pgd for vmalloc.
*/ */
static void __init static void __cpuinit
build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int bvaddr, unsigned int ptr) unsigned int bvaddr, unsigned int ptr)
{ {
@ -522,7 +522,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* TMP and PTR are scratch. * TMP and PTR are scratch.
* TMP will be clobbered, PTR will hold the pgd entry. * TMP will be clobbered, PTR will hold the pgd entry.
*/ */
static void __init __maybe_unused static void __cpuinit __maybe_unused
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
long pgdc = (long)pgd_current; long pgdc = (long)pgd_current;
@ -557,7 +557,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */
static void __init build_adjust_context(u32 **p, unsigned int ctx) static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
{ {
unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@ -583,7 +583,7 @@ static void __init build_adjust_context(u32 **p, unsigned int ctx)
uasm_i_andi(p, ctx, ctx, mask); uasm_i_andi(p, ctx, ctx, mask);
} }
static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{ {
/* /*
* Bug workaround for the Nevada. It seems as if under certain * Bug workaround for the Nevada. It seems as if under certain
@ -608,7 +608,7 @@ static void __init build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
} }
static void __init build_update_entries(u32 **p, unsigned int tmp, static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
unsigned int ptep) unsigned int ptep)
{ {
/* /*
@ -651,7 +651,7 @@ static void __init build_update_entries(u32 **p, unsigned int tmp,
#endif #endif
} }
static void __init build_r4000_tlb_refill_handler(void) static void __cpuinit build_r4000_tlb_refill_handler(void)
{ {
u32 *p = tlb_handler; u32 *p = tlb_handler;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -783,7 +783,7 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
static void __init static void __cpuinit
iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -803,7 +803,7 @@ iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr)
#endif #endif
} }
static void __init static void __cpuinit
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
unsigned int mode) unsigned int mode)
{ {
@ -863,7 +863,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
* the page table where this PTE is located, PTE will be re-loaded * the page table where this PTE is located, PTE will be re-loaded
* with it's original value. * with it's original value.
*/ */
static void __init static void __cpuinit
build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid) unsigned int pte, unsigned int ptr, enum label_id lid)
{ {
@ -874,7 +874,7 @@ build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
} }
/* Make PTE valid, store result in PTR. */ /* Make PTE valid, store result in PTR. */
static void __init static void __cpuinit
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
@ -887,7 +887,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be written to, if not branch to LABEL. Regardless * Check if PTE can be written to, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done. * restore PTE with value from PTR when done.
*/ */
static void __init static void __cpuinit
build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid) unsigned int pte, unsigned int ptr, enum label_id lid)
{ {
@ -900,7 +900,7 @@ build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Make PTE writable, update software status bits as well, then store /* Make PTE writable, update software status bits as well, then store
* at PTR. * at PTR.
*/ */
static void __init static void __cpuinit
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
@ -914,7 +914,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
* Check if PTE can be modified, if not branch to LABEL. Regardless * Check if PTE can be modified, if not branch to LABEL. Regardless
* restore PTE with value from PTR when done. * restore PTE with value from PTR when done.
*/ */
static void __init static void __cpuinit
build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
unsigned int pte, unsigned int ptr, enum label_id lid) unsigned int pte, unsigned int ptr, enum label_id lid)
{ {
@ -931,7 +931,7 @@ build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
* This places the pte into ENTRYLO0 and writes it with tlbwi. * This places the pte into ENTRYLO0 and writes it with tlbwi.
* Then it returns. * Then it returns.
*/ */
static void __init static void __cpuinit
build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
{ {
uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@ -947,7 +947,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
* may have the probe fail bit set as a result of a trap on a * may have the probe fail bit set as a result of a trap on a
* kseg2 access, i.e. without refill. Then it returns. * kseg2 access, i.e. without refill. Then it returns.
*/ */
static void __init static void __cpuinit
build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte, struct uasm_reloc **r, unsigned int pte,
unsigned int tmp) unsigned int tmp)
@ -965,7 +965,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
uasm_i_rfe(p); /* branch delay */ uasm_i_rfe(p); /* branch delay */
} }
static void __init static void __cpuinit
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
unsigned int ptr) unsigned int ptr)
{ {
@ -985,7 +985,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
uasm_i_tlbp(p); /* load delay */ uasm_i_tlbp(p); /* load delay */
} }
static void __init build_r3000_tlb_load_handler(void) static void __cpuinit build_r3000_tlb_load_handler(void)
{ {
u32 *p = handle_tlbl; u32 *p = handle_tlbl;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1015,7 +1015,7 @@ static void __init build_r3000_tlb_load_handler(void)
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
} }
static void __init build_r3000_tlb_store_handler(void) static void __cpuinit build_r3000_tlb_store_handler(void)
{ {
u32 *p = handle_tlbs; u32 *p = handle_tlbs;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1045,7 +1045,7 @@ static void __init build_r3000_tlb_store_handler(void)
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
} }
static void __init build_r3000_tlb_modify_handler(void) static void __cpuinit build_r3000_tlb_modify_handler(void)
{ {
u32 *p = handle_tlbm; u32 *p = handle_tlbm;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1078,7 +1078,7 @@ static void __init build_r3000_tlb_modify_handler(void)
/* /*
* R4000 style TLB load/store/modify handlers. * R4000 style TLB load/store/modify handlers.
*/ */
static void __init static void __cpuinit
build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int pte, struct uasm_reloc **r, unsigned int pte,
unsigned int ptr) unsigned int ptr)
@ -1103,7 +1103,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
build_tlb_probe_entry(p); build_tlb_probe_entry(p);
} }
static void __init static void __cpuinit
build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
struct uasm_reloc **r, unsigned int tmp, struct uasm_reloc **r, unsigned int tmp,
unsigned int ptr) unsigned int ptr)
@ -1120,7 +1120,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
#endif #endif
} }
static void __init build_r4000_tlb_load_handler(void) static void __cpuinit build_r4000_tlb_load_handler(void)
{ {
u32 *p = handle_tlbl; u32 *p = handle_tlbl;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1160,7 +1160,7 @@ static void __init build_r4000_tlb_load_handler(void)
dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
} }
static void __init build_r4000_tlb_store_handler(void) static void __cpuinit build_r4000_tlb_store_handler(void)
{ {
u32 *p = handle_tlbs; u32 *p = handle_tlbs;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1191,7 +1191,7 @@ static void __init build_r4000_tlb_store_handler(void)
dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
} }
static void __init build_r4000_tlb_modify_handler(void) static void __cpuinit build_r4000_tlb_modify_handler(void)
{ {
u32 *p = handle_tlbm; u32 *p = handle_tlbm;
struct uasm_label *l = labels; struct uasm_label *l = labels;
@ -1223,7 +1223,7 @@ static void __init build_r4000_tlb_modify_handler(void)
dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
} }
void __init build_tlb_refill_handler(void) void __cpuinit build_tlb_refill_handler(void)
{ {
/* /*
* The refill handler is generated per-CPU, multi-node systems * The refill handler is generated per-CPU, multi-node systems
@ -1269,7 +1269,7 @@ void __init build_tlb_refill_handler(void)
} }
} }
void __init flush_tlb_handlers(void) void __cpuinit flush_tlb_handlers(void)
{ {
flush_icache_range((unsigned long)handle_tlbl, flush_icache_range((unsigned long)handle_tlbl,
(unsigned long)handle_tlbl + sizeof(handle_tlbl)); (unsigned long)handle_tlbl + sizeof(handle_tlbl));

View File

@ -82,7 +82,7 @@ struct insn {
| (e) << RE_SH \ | (e) << RE_SH \
| (f) << FUNC_SH) | (f) << FUNC_SH)
static struct insn insn_table[] __initdata = { static struct insn insn_table[] __cpuinitdata = {
{ insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
@ -135,7 +135,7 @@ static struct insn insn_table[] __initdata = {
#undef M #undef M
static inline __init u32 build_rs(u32 arg) static inline __cpuinit u32 build_rs(u32 arg)
{ {
if (arg & ~RS_MASK) if (arg & ~RS_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -143,7 +143,7 @@ static inline __init u32 build_rs(u32 arg)
return (arg & RS_MASK) << RS_SH; return (arg & RS_MASK) << RS_SH;
} }
static inline __init u32 build_rt(u32 arg) static inline __cpuinit u32 build_rt(u32 arg)
{ {
if (arg & ~RT_MASK) if (arg & ~RT_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -151,7 +151,7 @@ static inline __init u32 build_rt(u32 arg)
return (arg & RT_MASK) << RT_SH; return (arg & RT_MASK) << RT_SH;
} }
static inline __init u32 build_rd(u32 arg) static inline __cpuinit u32 build_rd(u32 arg)
{ {
if (arg & ~RD_MASK) if (arg & ~RD_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -159,7 +159,7 @@ static inline __init u32 build_rd(u32 arg)
return (arg & RD_MASK) << RD_SH; return (arg & RD_MASK) << RD_SH;
} }
static inline __init u32 build_re(u32 arg) static inline __cpuinit u32 build_re(u32 arg)
{ {
if (arg & ~RE_MASK) if (arg & ~RE_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -167,7 +167,7 @@ static inline __init u32 build_re(u32 arg)
return (arg & RE_MASK) << RE_SH; return (arg & RE_MASK) << RE_SH;
} }
static inline __init u32 build_simm(s32 arg) static inline __cpuinit u32 build_simm(s32 arg)
{ {
if (arg > 0x7fff || arg < -0x8000) if (arg > 0x7fff || arg < -0x8000)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -175,7 +175,7 @@ static inline __init u32 build_simm(s32 arg)
return arg & 0xffff; return arg & 0xffff;
} }
static inline __init u32 build_uimm(u32 arg) static inline __cpuinit u32 build_uimm(u32 arg)
{ {
if (arg & ~IMM_MASK) if (arg & ~IMM_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -183,7 +183,7 @@ static inline __init u32 build_uimm(u32 arg)
return arg & IMM_MASK; return arg & IMM_MASK;
} }
static inline __init u32 build_bimm(s32 arg) static inline __cpuinit u32 build_bimm(s32 arg)
{ {
if (arg > 0x1ffff || arg < -0x20000) if (arg > 0x1ffff || arg < -0x20000)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -194,7 +194,7 @@ static inline __init u32 build_bimm(s32 arg)
return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
} }
static inline __init u32 build_jimm(u32 arg) static inline __cpuinit u32 build_jimm(u32 arg)
{ {
if (arg & ~((JIMM_MASK) << 2)) if (arg & ~((JIMM_MASK) << 2))
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -202,7 +202,7 @@ static inline __init u32 build_jimm(u32 arg)
return (arg >> 2) & JIMM_MASK; return (arg >> 2) & JIMM_MASK;
} }
static inline __init u32 build_func(u32 arg) static inline __cpuinit u32 build_func(u32 arg)
{ {
if (arg & ~FUNC_MASK) if (arg & ~FUNC_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -210,7 +210,7 @@ static inline __init u32 build_func(u32 arg)
return arg & FUNC_MASK; return arg & FUNC_MASK;
} }
static inline __init u32 build_set(u32 arg) static inline __cpuinit u32 build_set(u32 arg)
{ {
if (arg & ~SET_MASK) if (arg & ~SET_MASK)
printk(KERN_WARNING "Micro-assembler field overflow\n"); printk(KERN_WARNING "Micro-assembler field overflow\n");
@ -222,7 +222,7 @@ static inline __init u32 build_set(u32 arg)
* The order of opcode arguments is implicitly left to right, * The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM. * starting with RS and ending with FUNC or IMM.
*/ */
static void __init build_insn(u32 **buf, enum opcode opc, ...) static void __cpuinit build_insn(u32 **buf, enum opcode opc, ...)
{ {
struct insn *ip = NULL; struct insn *ip = NULL;
unsigned int i; unsigned int i;
@ -375,14 +375,14 @@ I_u3u1u2(_xor)
I_u2u1u3(_xori) I_u2u1u3(_xori)
/* Handle labels. */ /* Handle labels. */
void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid)
{ {
(*lab)->addr = addr; (*lab)->addr = addr;
(*lab)->lab = lid; (*lab)->lab = lid;
(*lab)++; (*lab)++;
} }
int __init uasm_in_compat_space_p(long addr) int __cpuinit uasm_in_compat_space_p(long addr)
{ {
/* Is this address in 32bit compat space? */ /* Is this address in 32bit compat space? */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
@ -392,7 +392,7 @@ int __init uasm_in_compat_space_p(long addr)
#endif #endif
} }
int __init uasm_rel_highest(long val) int __cpuinit uasm_rel_highest(long val)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@ -401,7 +401,7 @@ int __init uasm_rel_highest(long val)
#endif #endif
} }
int __init uasm_rel_higher(long val) int __cpuinit uasm_rel_higher(long val)
{ {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@ -410,17 +410,17 @@ int __init uasm_rel_higher(long val)
#endif #endif
} }
int __init uasm_rel_hi(long val) int __cpuinit uasm_rel_hi(long val)
{ {
return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
} }
int __init uasm_rel_lo(long val) int __cpuinit uasm_rel_lo(long val)
{ {
return ((val & 0xffff) ^ 0x8000) - 0x8000; return ((val & 0xffff) ^ 0x8000) - 0x8000;
} }
void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) void __cpuinit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
{ {
if (!uasm_in_compat_space_p(addr)) { if (!uasm_in_compat_space_p(addr)) {
uasm_i_lui(buf, rs, uasm_rel_highest(addr)); uasm_i_lui(buf, rs, uasm_rel_highest(addr));
@ -436,7 +436,7 @@ void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr)
uasm_i_lui(buf, rs, uasm_rel_hi(addr)); uasm_i_lui(buf, rs, uasm_rel_hi(addr));
} }
void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr) void __cpuinit UASM_i_LA(u32 **buf, unsigned int rs, long addr)
{ {
UASM_i_LA_mostly(buf, rs, addr); UASM_i_LA_mostly(buf, rs, addr);
if (uasm_rel_lo(addr)) { if (uasm_rel_lo(addr)) {
@ -448,7 +448,7 @@ void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr)
} }
/* Handle relocations. */ /* Handle relocations. */
void __init void __cpuinit
uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
{ {
(*rel)->addr = addr; (*rel)->addr = addr;
@ -457,7 +457,7 @@ uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid)
(*rel)++; (*rel)++;
} }
static inline void __init static inline void __cpuinit
__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{ {
long laddr = (long)lab->addr; long laddr = (long)lab->addr;
@ -474,7 +474,7 @@ __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
} }
} }
void __init void __cpuinit
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
{ {
struct uasm_label *l; struct uasm_label *l;
@ -485,7 +485,7 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
__resolve_relocs(rel, l); __resolve_relocs(rel, l);
} }
void __init void __cpuinit
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
{ {
for (; rel->lab != UASM_LABEL_INVALID; rel++) for (; rel->lab != UASM_LABEL_INVALID; rel++)
@ -493,7 +493,7 @@ uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off)
rel->addr += off; rel->addr += off;
} }
void __init void __cpuinit
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
{ {
for (; lab->lab != UASM_LABEL_INVALID; lab++) for (; lab->lab != UASM_LABEL_INVALID; lab++)
@ -501,7 +501,7 @@ uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off)
lab->addr += off; lab->addr += off;
} }
void __init void __cpuinit
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target) u32 *end, u32 *target)
{ {
@ -513,7 +513,7 @@ uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
uasm_move_labels(lab, first, end, off); uasm_move_labels(lab, first, end, off);
} }
int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) int __cpuinit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
{ {
for (; rel->lab != UASM_LABEL_INVALID; rel++) { for (; rel->lab != UASM_LABEL_INVALID; rel++) {
if (rel->addr == addr if (rel->addr == addr
@ -526,49 +526,49 @@ int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr)
} }
/* Convenience functions for labeled branches. */ /* Convenience functions for labeled branches. */
void __init void __cpuinit
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_bltz(p, reg, 0); uasm_i_bltz(p, reg, 0);
} }
void __init void __cpuinit
uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) uasm_il_b(u32 **p, struct uasm_reloc **r, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_b(p, 0); uasm_i_b(p, 0);
} }
void __init void __cpuinit
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqz(p, reg, 0); uasm_i_beqz(p, reg, 0);
} }
void __init void __cpuinit
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_beqzl(p, reg, 0); uasm_i_beqzl(p, reg, 0);
} }
void __init void __cpuinit
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_bnez(p, reg, 0); uasm_i_bnez(p, reg, 0);
} }
void __init void __cpuinit
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);
uasm_i_bgezl(p, reg, 0); uasm_i_bgezl(p, reg, 0);
} }
void __init void __cpuinit
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
{ {
uasm_r_mips_pc16(r, *p, lid); uasm_r_mips_pc16(r, *p, lid);

View File

@ -11,38 +11,38 @@
#include <linux/types.h> #include <linux/types.h>
#define Ip_u1u2u3(op) \ #define Ip_u1u2u3(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u2u1u3(op) \ #define Ip_u2u1u3(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u3u1u2(op) \ #define Ip_u3u1u2(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
#define Ip_u1u2s3(op) \ #define Ip_u1u2s3(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u2s3u1(op) \ #define Ip_u2s3u1(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c)
#define Ip_u2u1s3(op) \ #define Ip_u2u1s3(op) \
void __init \ void __cpuinit \
uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c)
#define Ip_u1u2(op) \ #define Ip_u1u2(op) \
void __init uasm_i##op(u32 **buf, unsigned int a, unsigned int b) void __cpuinit uasm_i##op(u32 **buf, unsigned int a, unsigned int b)
#define Ip_u1s2(op) \ #define Ip_u1s2(op) \
void __init uasm_i##op(u32 **buf, unsigned int a, signed int b) void __cpuinit uasm_i##op(u32 **buf, unsigned int a, signed int b)
#define Ip_u1(op) void __init uasm_i##op(u32 **buf, unsigned int a) #define Ip_u1(op) void __cpuinit uasm_i##op(u32 **buf, unsigned int a)
#define Ip_0(op) void __init uasm_i##op(u32 **buf) #define Ip_0(op) void __cpuinit uasm_i##op(u32 **buf)
Ip_u2u1s3(_addiu); Ip_u2u1s3(_addiu);
Ip_u3u1u2(_addu); Ip_u3u1u2(_addu);
@ -98,19 +98,19 @@ struct uasm_label {
int lab; int lab;
}; };
void __init uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); void __cpuinit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid);
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int __init uasm_in_compat_space_p(long addr); int uasm_in_compat_space_p(long addr);
int __init uasm_rel_highest(long val); int uasm_rel_highest(long val);
int __init uasm_rel_higher(long val); int uasm_rel_higher(long val);
#endif #endif
int __init uasm_rel_hi(long val); int uasm_rel_hi(long val);
int __init uasm_rel_lo(long val); int uasm_rel_lo(long val);
void __init UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr);
void __init UASM_i_LA(u32 **buf, unsigned int rs, long addr); void UASM_i_LA(u32 **buf, unsigned int rs, long addr);
#define UASM_L_LA(lb) \ #define UASM_L_LA(lb) \
static inline void __init uasm_l##lb(struct uasm_label **lab, u32 *addr) \ static inline void __cpuinit uasm_l##lb(struct uasm_label **lab, u32 *addr) \
{ \ { \
uasm_build_label(lab, addr, label##lb); \ uasm_build_label(lab, addr, label##lb); \
} }
@ -164,29 +164,19 @@ struct uasm_reloc {
/* This is zero so we can use zeroed label arrays. */ /* This is zero so we can use zeroed label arrays. */
#define UASM_LABEL_INVALID 0 #define UASM_LABEL_INVALID 0
void __init uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid); void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid);
void __init void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab);
uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off);
void __init void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off);
uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off); void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab,
void __init u32 *first, u32 *end, u32 *target);
uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off); int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
void __init
uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first,
u32 *end, u32 *target);
int __init uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr);
/* Convenience functions for labeled branches. */ /* Convenience functions for labeled branches. */
void __init void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
void __init uasm_il_b(u32 **p, struct uasm_reloc **r, int lid); void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void __init void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void __init void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void __init
uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void __init
uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
void __init
uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);

View File

@ -40,7 +40,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
extern struct pci_ops bridge_pci_ops; extern struct pci_ops bridge_pci_ops;
int __init bridge_probe(nasid_t nasid, int widget_id, int masterwid) int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
{ {
unsigned long offset = NODE_OFFSET(nasid); unsigned long offset = NODE_OFFSET(nasid);
struct bridge_controller *bc; struct bridge_controller *bc;

View File

@ -260,7 +260,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev,
} }
} }
void pcibios_fixup_bus(struct pci_bus *bus) void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{ {
/* Propagate hose info into the subordinate devices. */ /* Propagate hose info into the subordinate devices. */

View File

@ -53,7 +53,7 @@ extern void pcibr_setup(cnodeid_t);
extern void xtalk_probe_node(cnodeid_t nid); extern void xtalk_probe_node(cnodeid_t nid);
static void __init per_hub_init(cnodeid_t cnode) static void __cpuinit per_hub_init(cnodeid_t cnode)
{ {
struct hub_data *hub = hub_data(cnode); struct hub_data *hub = hub_data(cnode);
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);

View File

@ -285,7 +285,7 @@ void __cpuinit cpu_time_init(void)
set_c0_status(SRB_TIMOCLK); set_c0_status(SRB_TIMOCLK);
} }
void __init hub_rtc_init(cnodeid_t cnode) void __cpuinit hub_rtc_init(cnodeid_t cnode)
{ {
/* /*
* We only need to initialize the current node. * We only need to initialize the current node.

View File

@ -22,7 +22,7 @@
extern int bridge_probe(nasid_t nasid, int widget, int masterwid); extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
static int __init probe_one_port(nasid_t nasid, int widget, int masterwid) static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
{ {
widgetreg_t widget_id; widgetreg_t widget_id;
xwidget_part_num_t partnum; xwidget_part_num_t partnum;
@ -46,7 +46,7 @@ static int __init probe_one_port(nasid_t nasid, int widget, int masterwid)
return 0; return 0;
} }
static int __init xbow_probe(nasid_t nasid) static int __cpuinit xbow_probe(nasid_t nasid)
{ {
lboard_t *brd; lboard_t *brd;
klxbow_t *xbow_p; klxbow_t *xbow_p;
@ -99,7 +99,7 @@ static int __init xbow_probe(nasid_t nasid)
return 0; return 0;
} }
void __init xtalk_probe_node(cnodeid_t nid) void __cpuinit xtalk_probe_node(cnodeid_t nid)
{ {
volatile u64 hubreg; volatile u64 hubreg;
nasid_t nasid; nasid_t nasid;

View File

@ -93,7 +93,7 @@ extern void (*flush_data_cache_page)(unsigned long addr);
clear_bit(PG_dcache_dirty, &(page)->flags) clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */ /* Run kernel code uncached, useful for cache probing functions. */
unsigned long __init run_uncached(void *func); unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr); extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void); extern void kunmap_coherent(void);