dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (31 commits)
  [MIPS] Remove duplicate ISA DMA code for 0 DMA channel case.
  [MIPS] Remove unused definition of cpu_to_lelongp()
  [MIPS] Remove userspace proofing from <asm/bitops.h>.
  [MIPS] Remove old junk left from old atomic_lock.
  [MIPS] Use conditional traps for BUG_ON on MIPS II and better.
  [MIPS] mips HPT cleanup: make clocksource_mips public
  [MIPS] do_IRQ cleanup
  [MIPS] Avoid dupliate D-cache flush on R400C / R4400 SC and MC variants.
  [MIPS] Remove redundant r4k_blast_icache() calls
  [MIPS] Work around bogus gcc warnings.
  [MIPS] Fix double inclusions
  [MIPS] use generic_handle_irq, handle_level_irq, handle_percpu_irq
  [MIPS] IRQ cleanups
  [MIPS] mips hpt cleanup: get rid of mips_hpt_init
  [MIPS] PB1200: Remove duplicate definitions
  [MIPS] Fix alignment hole in struct cache_desc; shrink struct.
  [MIPS] Oprofile: kernel support for the R10000.
  [MIPS] Remove unused R10000 performance counter definitions.
  [MIPS] Add support for kexec
  [MIPS] Don't print presence of WAIT instruction on bootup.
  ...
This commit is contained in:
Linus Torvalds 2006-12-01 16:44:02 -08:00
commit c3e59d1e89
99 changed files with 1077 additions and 2123 deletions

View File

@ -266,8 +266,8 @@ config MIPS_MALTA
select BOOT_ELF32
select HAVE_STD_PC_SERIAL_PORT
select DMA_NONCOHERENT
select IRQ_CPU
select GENERIC_ISA_DMA
select IRQ_CPU
select HW_HAS_PCI
select I8259
select MIPS_BOARDS_GEN
@ -534,7 +534,7 @@ config SGI_IP22
select HW_HAS_EISA
select IP22_CPU_SCACHE
select IRQ_CPU
select NO_ISA if ISA
select GENERIC_ISA_DMA_SUPPORT_BROKEN
select SWAP_IO_SPACE
select SYS_HAS_CPU_R4X00
select SYS_HAS_CPU_R5000
@ -766,6 +766,23 @@ config TOSHIBA_RBTX4938
endchoice
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
source "arch/mips/ddb5xxx/Kconfig"
source "arch/mips/gt64120/ev64120/Kconfig"
source "arch/mips/jazz/Kconfig"
@ -864,6 +881,9 @@ config MIPS_NILE4
config MIPS_DISABLE_OBSOLETE_IDE
bool
config GENERIC_ISA_DMA_SUPPORT_BROKEN
bool
#
# Endianess selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
@ -1835,13 +1855,11 @@ source "drivers/pci/Kconfig"
config ISA
bool
config NO_ISA
bool
config EISA
bool "EISA support"
depends on HW_HAS_EISA
select ISA
select GENERIC_ISA_DMA
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.

View File

@ -63,9 +63,7 @@ cflags-y += -mabi=64
ifdef CONFIG_BUILD_ELF64
cflags-y += $(call cc-option,-mno-explicit-relocs)
else
# -msym32 can not be used for modules since they are loaded into XKSEG
CFLAGS_MODULE += $(call cc-option,-mno-explicit-relocs)
CFLAGS_KERNEL += $(call cc-option,-msym32)
cflags-y += $(call cc-option,-msym32)
endif
endif

View File

@ -70,7 +70,6 @@ extern irq_cpustat_t irq_stat [NR_CPUS];
extern void mips_timer_interrupt(void);
static void setup_local_irq(unsigned int irq, int type, int int_req);
static unsigned int startup_irq(unsigned int irq);
static void end_irq(unsigned int irq_nr);
static inline void mask_and_ack_level_irq(unsigned int irq_nr);
static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr);
@ -84,20 +83,6 @@ void (*board_init_irq)(void);
static DEFINE_SPINLOCK(irq_lock);
static unsigned int startup_irq(unsigned int irq_nr)
{
local_enable_irq(irq_nr);
return 0;
}
static void shutdown_irq(unsigned int irq_nr)
{
local_disable_irq(irq_nr);
return;
}
inline void local_enable_irq(unsigned int irq_nr)
{
if (irq_nr > AU1000_LAST_INTC0_INT) {
@ -249,41 +234,37 @@ void restore_local_and_enable(int controller, unsigned long mask)
static struct irq_chip rise_edge_irq_type = {
.typename = "Au1000 Rise Edge",
.startup = startup_irq,
.shutdown = shutdown_irq,
.enable = local_enable_irq,
.disable = local_disable_irq,
.ack = mask_and_ack_rise_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_rise_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip fall_edge_irq_type = {
.typename = "Au1000 Fall Edge",
.startup = startup_irq,
.shutdown = shutdown_irq,
.enable = local_enable_irq,
.disable = local_disable_irq,
.ack = mask_and_ack_fall_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_fall_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip either_edge_irq_type = {
.typename = "Au1000 Rise or Fall Edge",
.startup = startup_irq,
.shutdown = shutdown_irq,
.enable = local_enable_irq,
.disable = local_disable_irq,
.ack = mask_and_ack_either_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_either_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip level_irq_type = {
.typename = "Au1000 Level",
.startup = startup_irq,
.shutdown = shutdown_irq,
.enable = local_enable_irq,
.disable = local_disable_irq,
.ack = mask_and_ack_level_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_level_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
@ -328,31 +309,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
irq_desc[irq_nr].chip = &rise_edge_irq_type;
set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
irq_desc[irq_nr].chip = &fall_edge_irq_type;
set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2CLR);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
irq_desc[irq_nr].chip = &either_edge_irq_type;
set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1CLR);
au_writel(1<<(irq_nr-32), IC1_CFG0SET);
irq_desc[irq_nr].chip = &level_irq_type;
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<(irq_nr-32), IC1_CFG2SET);
au_writel(1<<(irq_nr-32), IC1_CFG1SET);
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
irq_desc[irq_nr].chip = &level_irq_type;
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<(irq_nr-32), IC1_CFG0CLR);
@ -380,31 +361,31 @@ static void setup_local_irq(unsigned int irq_nr, int type, int int_req)
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
irq_desc[irq_nr].chip = &rise_edge_irq_type;
set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
irq_desc[irq_nr].chip = &fall_edge_irq_type;
set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1<<irq_nr, IC0_CFG2CLR);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0SET);
irq_desc[irq_nr].chip = &either_edge_irq_type;
set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1CLR);
au_writel(1<<irq_nr, IC0_CFG0SET);
irq_desc[irq_nr].chip = &level_irq_type;
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1<<irq_nr, IC0_CFG2SET);
au_writel(1<<irq_nr, IC0_CFG1SET);
au_writel(1<<irq_nr, IC0_CFG0CLR);
irq_desc[irq_nr].chip = &level_irq_type;
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1<<irq_nr, IC0_CFG0CLR);

View File

@ -55,7 +55,7 @@
#endif
extern void _board_init_irq(void);
extern void (*board_init_irq)(void);
extern void (*board_init_irq)(void);
void board_reset (void)
{
@ -151,11 +151,7 @@ void __init board_setup(void)
#endif
/* Setup Pb1200 External Interrupt Controller */
{
extern void (*board_init_irq)(void);
extern void _board_init_irq(void);
board_init_irq = _board_init_irq;
}
board_init_irq = _board_init_irq;
}
int

View File

@ -45,25 +45,22 @@ static inline void galileo_irq(void)
{
unsigned int mask, pending, devfn;
mask = GALILEO_INL(GT_INTRMASK_OFS);
pending = GALILEO_INL(GT_INTRCAUSE_OFS) & mask;
mask = GT_READ(GT_INTRMASK_OFS);
pending = GT_READ(GT_INTRCAUSE_OFS) & mask;
if (pending & GALILEO_INTR_T0EXP) {
GALILEO_OUTL(~GALILEO_INTR_T0EXP, GT_INTRCAUSE_OFS);
if (pending & GT_INTR_T0EXP_MSK) {
GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_T0EXP_MSK);
do_IRQ(COBALT_GALILEO_IRQ);
} else if (pending & GALILEO_INTR_RETRY_CTR) {
devfn = GALILEO_INL(GT_PCI0_CFGADDR_OFS) >> 8;
GALILEO_OUTL(~GALILEO_INTR_RETRY_CTR, GT_INTRCAUSE_OFS);
printk(KERN_WARNING "Galileo: PCI retry count exceeded (%02x.%u)\n",
PCI_SLOT(devfn), PCI_FUNC(devfn));
} else if (pending & GT_INTR_RETRYCTR0_MSK) {
devfn = GT_READ(GT_PCI0_CFGADDR_OFS) >> 8;
GT_WRITE(GT_INTRCAUSE_OFS, ~GT_INTR_RETRYCTR0_MSK);
printk(KERN_WARNING
"Galileo: PCI retry count exceeded (%02x.%u)\n",
PCI_SLOT(devfn), PCI_FUNC(devfn));
} else {
GALILEO_OUTL(mask & ~pending, GT_INTRMASK_OFS);
printk(KERN_WARNING "Galileo: masking unexpected interrupt %08x\n", pending);
GT_WRITE(GT_INTRMASK_OFS, mask & ~pending);
printk(KERN_WARNING
"Galileo: masking unexpected interrupt %08x\n", pending);
}
}
@ -104,7 +101,7 @@ void __init arch_init_irq(void)
* Mask all Galileo interrupts. The Galileo
* handler is set in cobalt_timer_setup()
*/
GALILEO_OUTL(0, GT_INTRMASK_OFS);
GT_WRITE(GT_INTRMASK_OFS, 0);
init_i8259_irqs(); /* 0 ... 15 */
mips_cpu_irq_init(COBALT_CPU_IRQ); /* 16 ... 23 */

View File

@ -51,23 +51,23 @@ const char *get_system_type(void)
void __init plat_timer_setup(struct irqaction *irq)
{
/* Load timer value for HZ (TCLK is 50MHz) */
GALILEO_OUTL(50*1000*1000 / HZ, GT_TC0_OFS);
GT_WRITE(GT_TC0_OFS, 50*1000*1000 / HZ);
/* Enable timer */
GALILEO_OUTL(GALILEO_ENTC0 | GALILEO_SELTC0, GT_TC_CONTROL_OFS);
GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
/* Register interrupt */
setup_irq(COBALT_GALILEO_IRQ, irq);
/* Enable interrupt */
GALILEO_OUTL(GALILEO_INTR_T0EXP | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
GT_WRITE(GT_INTRMASK_OFS, GT_INTR_T0EXP_MSK | GT_READ(GT_INTRMASK_OFS));
}
extern struct pci_ops gt64111_pci_ops;
static struct resource cobalt_mem_resource = {
.start = GT64111_MEM_BASE,
.end = GT64111_MEM_END,
.start = GT_DEF_PCI0_MEM0_BASE,
.end = GT_DEF_PCI0_MEM0_BASE + GT_DEF_PCI0_MEM0_SIZE - 1,
.name = "PCI memory",
.flags = IORESOURCE_MEM
};
@ -115,7 +115,7 @@ static struct pci_controller cobalt_pci_controller = {
.mem_resource = &cobalt_mem_resource,
.mem_offset = 0,
.io_resource = &cobalt_io_resource,
.io_offset = 0 - GT64111_IO_BASE
.io_offset = 0 - GT_DEF_PCI0_IO_BASE,
};
void __init plat_mem_setup(void)
@ -128,7 +128,7 @@ void __init plat_mem_setup(void)
_machine_halt = cobalt_machine_halt;
pm_power_off = cobalt_machine_power_off;
set_io_port_base(CKSEG1ADDR(GT64111_IO_BASE));
set_io_port_base(CKSEG1ADDR(GT_DEF_PCI0_IO_BASE));
/* I/O port resource must include UART and LCD/buttons */
ioport_resource.end = 0x0fffffff;
@ -139,7 +139,7 @@ void __init plat_mem_setup(void)
/* Read the cobalt id register out of the PCI config space */
PCI_CFG_SET(devfn, (VIA_COBALT_BRD_ID_REG & ~0x3));
cobalt_board_id = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
cobalt_board_id = GT_READ(GT_PCI0_CFGDATA_OFS);
cobalt_board_id >>= ((VIA_COBALT_BRD_ID_REG & 3) * 8);
cobalt_board_id = VIA_COBALT_BRD_REG_to_ID(cobalt_board_id);

View File

@ -53,14 +53,6 @@ vrc5477_irq_disable(unsigned int irq)
ll_vrc5477_irq_disable(irq - vrc5477_irq_base);
}
static unsigned int vrc5477_irq_startup(unsigned int irq)
{
vrc5477_irq_enable(irq);
return 0;
}
#define vrc5477_irq_shutdown vrc5477_irq_disable
static void
vrc5477_irq_ack(unsigned int irq)
{
@ -91,11 +83,10 @@ vrc5477_irq_end(unsigned int irq)
struct irq_chip vrc5477_irq_controller = {
.typename = "vrc5477_irq",
.startup = vrc5477_irq_startup,
.shutdown = vrc5477_irq_shutdown,
.enable = vrc5477_irq_enable,
.disable = vrc5477_irq_disable,
.ack = vrc5477_irq_ack,
.mask = vrc5477_irq_disable,
.mask_ack = vrc5477_irq_ack,
.unmask = vrc5477_irq_enable,
.end = vrc5477_irq_end
};
@ -103,12 +94,8 @@ void __init vrc5477_irq_init(u32 irq_base)
{
u32 i;
for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &vrc5477_irq_controller;
}
for (i= irq_base; i< irq_base+ NUM_5477_IRQ; i++)
set_irq_chip(i, &vrc5477_irq_controller);
vrc5477_irq_base = irq_base;
}

View File

@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/addrspace.h>
@ -231,13 +230,10 @@ irqreturn_t dec_ecc_be_interrupt(int irq, void *dev_id)
static inline void dec_kn02_be_init(void)
{
volatile u32 *csr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CSR);
unsigned long flags;
kn0x_erraddr = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_ERRADDR);
kn0x_chksyn = (void *)CKSEG1ADDR(KN02_SLOT_BASE + KN02_CHKSYN);
spin_lock_irqsave(&kn02_lock, flags);
/* Preset write-only bits of the Control Register cache. */
cached_kn02_csr = *csr | KN02_CSR_LEDS;
@ -247,8 +243,6 @@ static inline void dec_kn02_be_init(void)
cached_kn02_csr |= KN02_CSR_CORRECT;
*csr = cached_kn02_csr;
iob();
spin_unlock_irqrestore(&kn02_lock, flags);
}
static inline void dec_kn03_be_init(void)

View File

@ -267,7 +267,7 @@ handle_it:
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
j do_IRQ
j dec_irq_dispatch
nop
#ifdef CONFIG_32BIT

View File

@ -13,7 +13,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/dec/ioasic.h>
@ -21,8 +20,6 @@
#include <asm/dec/ioasic_ints.h>
static DEFINE_SPINLOCK(ioasic_lock);
static int ioasic_irq_base;
@ -52,65 +49,31 @@ static inline void clear_ioasic_irq(unsigned int irq)
ioasic_write(IO_REG_SIR, sir);
}
static inline void enable_ioasic_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ioasic_lock, flags);
unmask_ioasic_irq(irq);
spin_unlock_irqrestore(&ioasic_lock, flags);
}
static inline void disable_ioasic_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ioasic_lock, flags);
mask_ioasic_irq(irq);
spin_unlock_irqrestore(&ioasic_lock, flags);
}
static inline unsigned int startup_ioasic_irq(unsigned int irq)
{
enable_ioasic_irq(irq);
return 0;
}
#define shutdown_ioasic_irq disable_ioasic_irq
static inline void ack_ioasic_irq(unsigned int irq)
{
spin_lock(&ioasic_lock);
mask_ioasic_irq(irq);
spin_unlock(&ioasic_lock);
fast_iob();
}
static inline void end_ioasic_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
enable_ioasic_irq(irq);
unmask_ioasic_irq(irq);
}
static struct irq_chip ioasic_irq_type = {
.typename = "IO-ASIC",
.startup = startup_ioasic_irq,
.shutdown = shutdown_ioasic_irq,
.enable = enable_ioasic_irq,
.disable = disable_ioasic_irq,
.ack = ack_ioasic_irq,
.mask = mask_ioasic_irq,
.mask_ack = ack_ioasic_irq,
.unmask = unmask_ioasic_irq,
.end = end_ioasic_irq,
};
#define startup_ioasic_dma_irq startup_ioasic_irq
#define unmask_ioasic_dma_irq unmask_ioasic_irq
#define shutdown_ioasic_dma_irq shutdown_ioasic_irq
#define enable_ioasic_dma_irq enable_ioasic_irq
#define disable_ioasic_dma_irq disable_ioasic_irq
#define mask_ioasic_dma_irq mask_ioasic_irq
#define ack_ioasic_dma_irq ack_ioasic_irq
@ -123,11 +86,10 @@ static inline void end_ioasic_dma_irq(unsigned int irq)
static struct irq_chip ioasic_dma_irq_type = {
.typename = "IO-ASIC-DMA",
.startup = startup_ioasic_dma_irq,
.shutdown = shutdown_ioasic_dma_irq,
.enable = enable_ioasic_dma_irq,
.disable = disable_ioasic_dma_irq,
.ack = ack_ioasic_dma_irq,
.mask = mask_ioasic_dma_irq,
.mask_ack = ack_ioasic_dma_irq,
.unmask = unmask_ioasic_dma_irq,
.end = end_ioasic_dma_irq,
};
@ -140,18 +102,12 @@ void __init init_ioasic_irqs(int base)
ioasic_write(IO_REG_SIMR, 0);
fast_iob();
for (i = base; i < base + IO_INR_DMA; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &ioasic_irq_type;
}
for (; i < base + IO_IRQ_LINES; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &ioasic_dma_irq_type;
}
for (i = base; i < base + IO_INR_DMA; i++)
set_irq_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
set_irq_chip_and_handler(i, &ioasic_dma_irq_type,
handle_level_irq);
ioasic_irq_base = base;
}

View File

@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/dec/kn02.h>
@ -29,7 +28,6 @@
* There is no default value -- it has to be initialized.
*/
u32 cached_kn02_csr;
DEFINE_SPINLOCK(kn02_lock);
static int kn02_irq_base;
@ -53,54 +51,24 @@ static inline void mask_kn02_irq(unsigned int irq)
*csr = cached_kn02_csr;
}
static inline void enable_kn02_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&kn02_lock, flags);
unmask_kn02_irq(irq);
spin_unlock_irqrestore(&kn02_lock, flags);
}
static inline void disable_kn02_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&kn02_lock, flags);
mask_kn02_irq(irq);
spin_unlock_irqrestore(&kn02_lock, flags);
}
static unsigned int startup_kn02_irq(unsigned int irq)
{
enable_kn02_irq(irq);
return 0;
}
#define shutdown_kn02_irq disable_kn02_irq
static void ack_kn02_irq(unsigned int irq)
{
spin_lock(&kn02_lock);
mask_kn02_irq(irq);
spin_unlock(&kn02_lock);
iob();
}
static void end_kn02_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
enable_kn02_irq(irq);
unmask_kn02_irq(irq);
}
static struct irq_chip kn02_irq_type = {
.typename = "KN02-CSR",
.startup = startup_kn02_irq,
.shutdown = shutdown_kn02_irq,
.enable = enable_kn02_irq,
.disable = disable_kn02_irq,
.ack = ack_kn02_irq,
.mask = mask_kn02_irq,
.mask_ack = ack_kn02_irq,
.unmask = unmask_kn02_irq,
.end = end_kn02_irq,
};
@ -109,22 +77,15 @@ void __init init_kn02_irqs(int base)
{
volatile u32 *csr = (volatile u32 *)CKSEG1ADDR(KN02_SLOT_BASE +
KN02_CSR);
unsigned long flags;
int i;
/* Mask interrupts. */
spin_lock_irqsave(&kn02_lock, flags);
cached_kn02_csr &= ~KN02_CSR_IOINTEN;
*csr = cached_kn02_csr;
iob();
spin_unlock_irqrestore(&kn02_lock, flags);
for (i = base; i < base + KN02_IRQ_LINES; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &kn02_irq_type;
}
for (i = base; i < base + KN02_IRQ_LINES; i++)
set_irq_chip_and_handler(i, &kn02_irq_type, handle_level_irq);
kn02_irq_base = base;
}

View File

@ -761,3 +761,9 @@ void __init arch_init_irq(void)
if (dec_interrupt[DEC_IRQ_HALT] >= 0)
setup_irq(dec_interrupt[DEC_IRQ_HALT], &haltirq);
}
asmlinkage unsigned int dec_irq_dispatch(unsigned int irq)
{
do_IRQ(irq);
return 0;
}

View File

@ -151,7 +151,7 @@ static void dec_timer_ack(void)
CMOS_READ(RTC_REG_C); /* Ack the RTC interrupt. */
}
static unsigned int dec_ioasic_hpt_read(void)
static cycle_t dec_ioasic_hpt_read(void)
{
/*
* The free-running counter is 32-bit which is good for about
@ -171,7 +171,7 @@ void __init dec_time_init(void)
if (!cpu_has_counter && IOASIC)
/* For pre-R4k systems we use the I/O ASIC's counter. */
mips_hpt_read = dec_ioasic_hpt_read;
clocksource_mips.read = dec_ioasic_hpt_read;
/* Set up the rate of periodic DS1287 interrupts. */
CMOS_WRITE(RTC_REF_CLCK_32KHZ | (16 - __ffs(HZ)), RTC_REG_A);

View File

@ -56,22 +56,6 @@ static void emma2rh_irq_disable(unsigned int irq)
ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
}
static unsigned int emma2rh_irq_startup(unsigned int irq)
{
emma2rh_irq_enable(irq);
return 0;
}
#define emma2rh_irq_shutdown emma2rh_irq_disable
static void emma2rh_irq_ack(unsigned int irq)
{
/* disable interrupt - some handler will re-enable the irq
* and if the interrupt is leveled, we will have infinite loop
*/
ll_emma2rh_irq_disable(irq - emma2rh_irq_base);
}
static void emma2rh_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -80,25 +64,20 @@ static void emma2rh_irq_end(unsigned int irq)
struct irq_chip emma2rh_irq_controller = {
.typename = "emma2rh_irq",
.startup = emma2rh_irq_startup,
.shutdown = emma2rh_irq_shutdown,
.enable = emma2rh_irq_enable,
.disable = emma2rh_irq_disable,
.ack = emma2rh_irq_ack,
.ack = emma2rh_irq_disable,
.mask = emma2rh_irq_disable,
.mask_ack = emma2rh_irq_disable,
.unmask = emma2rh_irq_enable,
.end = emma2rh_irq_end,
.set_affinity = NULL /* no affinity stuff for UP */
};
void emma2rh_irq_init(u32 irq_base)
{
u32 i;
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &emma2rh_irq_controller;
}
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ; i++)
set_irq_chip_and_handler(i, &emma2rh_irq_controller,
handle_level_irq);
emma2rh_irq_base = irq_base;
}

View File

@ -48,19 +48,6 @@ static void emma2rh_sw_irq_disable(unsigned int irq)
ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
}
static unsigned int emma2rh_sw_irq_startup(unsigned int irq)
{
emma2rh_sw_irq_enable(irq);
return 0;
}
#define emma2rh_sw_irq_shutdown emma2rh_sw_irq_disable
static void emma2rh_sw_irq_ack(unsigned int irq)
{
ll_emma2rh_sw_irq_disable(irq - emma2rh_sw_irq_base);
}
static void emma2rh_sw_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -69,25 +56,20 @@ static void emma2rh_sw_irq_end(unsigned int irq)
struct irq_chip emma2rh_sw_irq_controller = {
.typename = "emma2rh_sw_irq",
.startup = emma2rh_sw_irq_startup,
.shutdown = emma2rh_sw_irq_shutdown,
.enable = emma2rh_sw_irq_enable,
.disable = emma2rh_sw_irq_disable,
.ack = emma2rh_sw_irq_ack,
.ack = emma2rh_sw_irq_disable,
.mask = emma2rh_sw_irq_disable,
.mask_ack = emma2rh_sw_irq_disable,
.unmask = emma2rh_sw_irq_enable,
.end = emma2rh_sw_irq_end,
.set_affinity = NULL,
};
void emma2rh_sw_irq_init(u32 irq_base)
{
u32 i;
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 2;
irq_desc[i].chip = &emma2rh_sw_irq_controller;
}
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_SW; i++)
set_irq_chip_and_handler(i, &emma2rh_sw_irq_controller,
handle_level_irq);
emma2rh_sw_irq_base = irq_base;
}
@ -126,14 +108,6 @@ static void emma2rh_gpio_irq_disable(unsigned int irq)
ll_emma2rh_gpio_irq_disable(irq - emma2rh_gpio_irq_base);
}
static unsigned int emma2rh_gpio_irq_startup(unsigned int irq)
{
emma2rh_gpio_irq_enable(irq);
return 0;
}
#define emma2rh_gpio_irq_shutdown emma2rh_gpio_irq_disable
static void emma2rh_gpio_irq_ack(unsigned int irq)
{
irq -= emma2rh_gpio_irq_base;
@ -149,25 +123,19 @@ static void emma2rh_gpio_irq_end(unsigned int irq)
struct irq_chip emma2rh_gpio_irq_controller = {
.typename = "emma2rh_gpio_irq",
.startup = emma2rh_gpio_irq_startup,
.shutdown = emma2rh_gpio_irq_shutdown,
.enable = emma2rh_gpio_irq_enable,
.disable = emma2rh_gpio_irq_disable,
.ack = emma2rh_gpio_irq_ack,
.mask = emma2rh_gpio_irq_disable,
.mask_ack = emma2rh_gpio_irq_ack,
.unmask = emma2rh_gpio_irq_enable,
.end = emma2rh_gpio_irq_end,
.set_affinity = NULL,
};
void emma2rh_gpio_irq_init(u32 irq_base)
{
u32 i;
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 2;
irq_desc[i].chip = &emma2rh_gpio_irq_controller;
}
for (i = irq_base; i < irq_base + NUM_EMMA2RH_IRQ_GPIO; i++)
set_irq_chip(i, &emma2rh_gpio_irq_controller);
emma2rh_gpio_irq_base = irq_base;
}

View File

@ -66,38 +66,21 @@ asmlinkage void plat_irq_dispatch(void)
static void disable_ev64120_irq(unsigned int irq_nr)
{
unsigned long flags;
local_irq_save(flags);
if (irq_nr >= 8) { // All PCI interrupts are on line 5 or 2
clear_c0_status(9 << 10);
} else {
clear_c0_status(1 << (irq_nr + 8));
}
local_irq_restore(flags);
}
static void enable_ev64120_irq(unsigned int irq_nr)
{
unsigned long flags;
local_irq_save(flags);
if (irq_nr >= 8) // All PCI interrupts are on line 5 or 2
set_c0_status(9 << 10);
else
set_c0_status(1 << (irq_nr + 8));
local_irq_restore(flags);
}
static unsigned int startup_ev64120_irq(unsigned int irq)
{
enable_ev64120_irq(irq);
return 0; /* Never anything pending */
}
#define shutdown_ev64120_irq disable_ev64120_irq
#define mask_and_ack_ev64120_irq disable_ev64120_irq
static void end_ev64120_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -106,13 +89,11 @@ static void end_ev64120_irq(unsigned int irq)
static struct irq_chip ev64120_irq_type = {
.typename = "EV64120",
.startup = startup_ev64120_irq,
.shutdown = shutdown_ev64120_irq,
.enable = enable_ev64120_irq,
.disable = disable_ev64120_irq,
.ack = mask_and_ack_ev64120_irq,
.ack = disable_ev64120_irq,
.mask = disable_ev64120_irq,
.mask_ack = disable_ev64120_irq,
.unmask = enable_ev64120_irq,
.end = end_ev64120_irq,
.set_affinity = NULL
};
void gt64120_irq_setup(void)
@ -122,8 +103,6 @@ void gt64120_irq_setup(void)
*/
clear_c0_status(ST0_IM);
local_irq_disable();
/*
* Enable timer. Other interrupts will be enabled as they are
* registered.
@ -133,16 +112,5 @@ void gt64120_irq_setup(void)
void __init arch_init_irq(void)
{
int i;
/* Let's initialize our IRQ descriptors */
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].status = 0;
irq_desc[i].chip = &no_irq_chip;
irq_desc[i].action = NULL;
irq_desc[i].depth = 0;
spin_lock_init(&irq_desc[i].lock);
}
gt64120_irq_setup();
}

View File

@ -28,14 +28,6 @@ static void enable_r4030_irq(unsigned int irq)
spin_unlock_irqrestore(&r4030_lock, flags);
}
static unsigned int startup_r4030_irq(unsigned int irq)
{
enable_r4030_irq(irq);
return 0; /* never anything pending */
}
#define shutdown_r4030_irq disable_r4030_irq
void disable_r4030_irq(unsigned int irq)
{
unsigned int mask = ~(1 << (irq - JAZZ_PARALLEL_IRQ));
@ -47,8 +39,6 @@ void disable_r4030_irq(unsigned int irq)
spin_unlock_irqrestore(&r4030_lock, flags);
}
#define mask_and_ack_r4030_irq disable_r4030_irq
static void end_r4030_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -57,11 +47,10 @@ static void end_r4030_irq(unsigned int irq)
static struct irq_chip r4030_irq_type = {
.typename = "R4030",
.startup = startup_r4030_irq,
.shutdown = shutdown_r4030_irq,
.enable = enable_r4030_irq,
.disable = disable_r4030_irq,
.ack = mask_and_ack_r4030_irq,
.ack = disable_r4030_irq,
.mask = disable_r4030_irq,
.mask_ack = disable_r4030_irq,
.unmask = enable_r4030_irq,
.end = end_r4030_irq,
};
@ -69,12 +58,8 @@ void __init init_r4030_ints(void)
{
int i;
for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &r4030_irq_type;
}
for (i = JAZZ_PARALLEL_IRQ; i <= JAZZ_TIMER_IRQ; i++)
set_irq_chip_and_handler(i, &r4030_irq_type, handle_level_irq);
r4030_write_reg16(JAZZ_IO_IRQ_ENABLE, 0);
r4030_read_reg16(JAZZ_IO_IRQ_SOURCE); /* clear pending IRQs */

View File

@ -90,17 +90,6 @@ static unsigned char irc_level[TX3927_NUM_IR] = {
static void jmr3927_irq_disable(unsigned int irq_nr);
static void jmr3927_irq_enable(unsigned int irq_nr);
static DEFINE_SPINLOCK(jmr3927_irq_lock);
static unsigned int jmr3927_irq_startup(unsigned int irq)
{
jmr3927_irq_enable(irq);
return 0;
}
#define jmr3927_irq_shutdown jmr3927_irq_disable
static void jmr3927_irq_ack(unsigned int irq)
{
if (irq == JMR3927_IRQ_IRC_TMR0)
@ -118,9 +107,7 @@ static void jmr3927_irq_end(unsigned int irq)
static void jmr3927_irq_disable(unsigned int irq_nr)
{
struct tb_irq_space* sp;
unsigned long flags;
spin_lock_irqsave(&jmr3927_irq_lock, flags);
for (sp = tb_irq_spaces; sp; sp = sp->next) {
if (sp->start_irqno <= irq_nr &&
irq_nr < sp->start_irqno + sp->nr_irqs) {
@ -130,15 +117,12 @@ static void jmr3927_irq_disable(unsigned int irq_nr)
break;
}
}
spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
}
static void jmr3927_irq_enable(unsigned int irq_nr)
{
struct tb_irq_space* sp;
unsigned long flags;
spin_lock_irqsave(&jmr3927_irq_lock, flags);
for (sp = tb_irq_spaces; sp; sp = sp->next) {
if (sp->start_irqno <= irq_nr &&
irq_nr < sp->start_irqno + sp->nr_irqs) {
@ -148,7 +132,6 @@ static void jmr3927_irq_enable(unsigned int irq_nr)
break;
}
}
spin_unlock_irqrestore(&jmr3927_irq_lock, flags);
}
/*
@ -457,11 +440,10 @@ void __init arch_init_irq(void)
static struct irq_chip jmr3927_irq_controller = {
.typename = "jmr3927_irq",
.startup = jmr3927_irq_startup,
.shutdown = jmr3927_irq_shutdown,
.enable = jmr3927_irq_enable,
.disable = jmr3927_irq_disable,
.ack = jmr3927_irq_ack,
.mask = jmr3927_irq_disable,
.mask_ack = jmr3927_irq_ack,
.unmask = jmr3927_irq_enable,
.end = jmr3927_irq_end,
};
@ -469,12 +451,8 @@ void jmr3927_irq_init(u32 irq_base)
{
u32 i;
for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &jmr3927_irq_controller;
}
for (i= irq_base; i< irq_base + JMR3927_NR_IRQ_IRC + JMR3927_NR_IRQ_IOC; i++)
set_irq_chip(i, &jmr3927_irq_controller);
jmr3927_irq_base = irq_base;
}

View File

@ -170,7 +170,7 @@ static void jmr3927_machine_power_off(void)
while (1);
}
static unsigned int jmr3927_hpt_read(void)
static cycle_t jmr3927_hpt_read(void)
{
/* We assume this function is called xtime_lock held. */
return jiffies * (JMR3927_TIMER_CLK / HZ) + jmr3927_tmrptr->trr;
@ -182,7 +182,7 @@ extern void rtc_ds1742_init(unsigned long base);
#endif
static void __init jmr3927_time_init(void)
{
mips_hpt_read = jmr3927_hpt_read;
clocksource_mips.read = jmr3927_hpt_read;
mips_hpt_frequency = JMR3927_TIMER_CLK;
#ifdef USE_RTC_DS1742
if (jmr3927_have_nvram()) {

View File

@ -45,7 +45,6 @@ obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_NO_ISA) += dma-no-isa.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
@ -67,6 +66,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
EXTRA_AFLAGS := $(CFLAGS)

View File

@ -110,9 +110,8 @@ static inline void check_wait(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
printk("Checking for 'wait' instruction... ");
if (nowait) {
printk (" disabled.\n");
printk("Wait instruction disabled.\n");
return;
}
@ -120,11 +119,9 @@ static inline void check_wait(void)
case CPU_R3081:
case CPU_R3081E:
cpu_wait = r3081_wait;
printk(" available.\n");
break;
case CPU_TX3927:
cpu_wait = r39xx_wait;
printk(" available.\n");
break;
case CPU_R4200:
/* case CPU_R4300: */
@ -146,33 +143,23 @@ static inline void check_wait(void)
case CPU_74K:
case CPU_PR4450:
cpu_wait = r4k_wait;
printk(" available.\n");
break;
case CPU_TX49XX:
cpu_wait = r4k_wait_irqoff;
printk(" available.\n");
break;
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
case CPU_AU1550:
case CPU_AU1200:
if (allow_au1k_wait) {
if (allow_au1k_wait)
cpu_wait = au1k_wait;
printk(" available.\n");
} else
printk(" unavailable.\n");
break;
case CPU_RM9000:
if ((c->processor_id & 0x00ff) >= 0x40) {
if ((c->processor_id & 0x00ff) >= 0x40)
cpu_wait = r4k_wait;
printk(" available.\n");
} else {
printk(" unavailable.\n");
}
break;
default:
printk(" unavailable.\n");
break;
}
}

View File

@ -1,28 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 by Ralf Baechle
*
* Dummy ISA DMA functions for systems that don't have ISA but share drivers
* with ISA such as legacy free PCI.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
DEFINE_SPINLOCK(dma_spin_lock);
int request_dma(unsigned int dmanr, const char * device_id)
{
return -EINVAL;
}
void free_dma(unsigned int dmanr)
{
}
EXPORT_SYMBOL(dma_spin_lock);
EXPORT_SYMBOL(request_dma);
EXPORT_SYMBOL(free_dma);

View File

@ -19,6 +19,7 @@
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/war.h>
#include <asm/page.h>
#define PANIC_PIC(msg) \
.set push; \
@ -378,6 +379,68 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER dsp dsp sti silent /* #26 */
BUILD_HANDLER reserved reserved sti verbose /* others */
.align 5
LEAF(handle_ri_rdhwr_vivt)
#ifdef CONFIG_MIPS_MT_SMTC
PANIC_PIC("handle_ri_rdhwr_vivt called")
#else
.set push
.set noat
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
PTR_SRL k0, PAGE_SHIFT + 1
PTR_SLL k0, PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
tlbp
tlb_probe_hazard
mfc0 k1, CP0_INDEX
.set pop
bltz k1, handle_ri /* slow path */
/* fall thru */
#endif
END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr)
.set push
.set noat
.set noreorder
/* 0x7c03e83b: rdhwr v1,$29 */
MFC0 k1, CP0_EPC
lui k0, 0x7c03
lw k1, (k1)
ori k0, 0xe83b
.set reorder
bne k0, k1, handle_ri /* if not ours */
/* The insn is rdhwr. No need to check CAUSE.BD here. */
get_saved_sp /* k1 := current_thread_info */
.set noreorder
MFC0 k0, CP0_EPC
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
LONG_ADDIU k0, 4
jr k0
rfe
#else
LONG_ADDIU k0, 4 /* stall on $k0 */
MTC0 k0, CP0_EPC
/* I hope three instructions between MTC0 and ERET are enough... */
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
.set mips3
eret
.set mips0
#endif
.set pop
END(handle_ri_rdhwr)
#ifdef CONFIG_64BIT
/* A temporary overflow handler used by check_daddi(). */

View File

@ -250,6 +250,9 @@ NESTED(smp_bootstrap, 16, sp)
*/
page swapper_pg_dir, _PGD_ORDER
#ifdef CONFIG_64BIT
#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
page module_pg_dir, _PGD_ORDER
#endif
page invalid_pmd_table, _PMD_ORDER
#endif
page invalid_pte_table, _PTE_ORDER

View File

@ -40,21 +40,10 @@ static void end_8259A_irq (unsigned int irq)
enable_8259A_irq(irq);
}
#define shutdown_8259A_irq disable_8259A_irq
void mask_and_ack_8259A(unsigned int);
static unsigned int startup_8259A_irq(unsigned int irq)
{
enable_8259A_irq(irq);
return 0; /* never anything pending */
}
static struct irq_chip i8259A_irq_type = {
.typename = "XT-PIC",
.startup = startup_8259A_irq,
.shutdown = shutdown_8259A_irq,
.enable = enable_8259A_irq,
.disable = disable_8259A_irq,
.ack = mask_and_ack_8259A,
@ -120,7 +109,7 @@ int i8259A_irq_pending(unsigned int irq)
void make_8259A_irq(unsigned int irq)
{
disable_irq_nosync(irq);
irq_desc[irq].chip = &i8259A_irq_type;
set_irq_chip(irq, &i8259A_irq_type);
enable_irq(irq);
}
@ -323,12 +312,8 @@ void __init init_i8259_irqs (void)
init_8259A(0);
for (i = 0; i < 16; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &i8259A_irq_type;
}
for (i = 0; i < 16; i++)
set_irq_chip(i, &i8259A_irq_type);
setup_irq(2, &irq2);
}

View File

@ -44,31 +44,6 @@ static inline void unmask_msc_irq(unsigned int irq)
MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
}
/*
* Enables the IRQ on SOC-it
*/
static void enable_msc_irq(unsigned int irq)
{
unmask_msc_irq(irq);
}
/*
* Initialize the IRQ on SOC-it
*/
static unsigned int startup_msc_irq(unsigned int irq)
{
unmask_msc_irq(irq);
return 0;
}
/*
* Disables the IRQ on SOC-it
*/
static void disable_msc_irq(unsigned int irq)
{
mask_msc_irq(irq);
}
/*
* Masks and ACKs an IRQ
*/
@ -136,25 +111,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
(irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
}
#define shutdown_msc_irq disable_msc_irq
struct irq_chip msc_levelirq_type = {
.typename = "SOC-it-Level",
.startup = startup_msc_irq,
.shutdown = shutdown_msc_irq,
.enable = enable_msc_irq,
.disable = disable_msc_irq,
.ack = level_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = level_mask_and_ack_msc_irq,
.unmask = unmask_msc_irq,
.eoi = unmask_msc_irq,
.end = end_msc_irq,
};
struct irq_chip msc_edgeirq_type = {
.typename = "SOC-it-Edge",
.startup =startup_msc_irq,
.shutdown = shutdown_msc_irq,
.enable = enable_msc_irq,
.disable = disable_msc_irq,
.ack = edge_mask_and_ack_msc_irq,
.mask = mask_msc_irq,
.mask_ack = edge_mask_and_ack_msc_irq,
.unmask = unmask_msc_irq,
.eoi = unmask_msc_irq,
.end = end_msc_irq,
};
@ -175,14 +148,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
switch (imp->im_type) {
case MSC01_IRQ_EDGE:
irq_desc[base+n].chip = &msc_edgeirq_type;
set_irq_chip(base+n, &msc_edgeirq_type);
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
else
MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
break;
case MSC01_IRQ_LEVEL:
irq_desc[base+n].chip = &msc_levelirq_type;
set_irq_chip(base+n, &msc_levelirq_type);
if (cpu_has_veic)
MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
else

View File

@ -66,39 +66,6 @@ static inline void unmask_mv64340_irq(unsigned int irq)
}
}
/*
* Enables the IRQ on Marvell Chip
*/
static void enable_mv64340_irq(unsigned int irq)
{
unmask_mv64340_irq(irq);
}
/*
* Initialize the IRQ on Marvell Chip
*/
static unsigned int startup_mv64340_irq(unsigned int irq)
{
unmask_mv64340_irq(irq);
return 0;
}
/*
* Disables the IRQ on Marvell Chip
*/
static void disable_mv64340_irq(unsigned int irq)
{
mask_mv64340_irq(irq);
}
/*
* Masks and ACKs an IRQ
*/
static void mask_and_ack_mv64340_irq(unsigned int irq)
{
mask_mv64340_irq(irq);
}
/*
* End IRQ processing
*/
@ -133,15 +100,12 @@ void ll_mv64340_irq(void)
do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
}
#define shutdown_mv64340_irq disable_mv64340_irq
struct irq_chip mv64340_irq_type = {
.typename = "MV-64340",
.startup = startup_mv64340_irq,
.shutdown = shutdown_mv64340_irq,
.enable = enable_mv64340_irq,
.disable = disable_mv64340_irq,
.ack = mask_and_ack_mv64340_irq,
.ack = mask_mv64340_irq,
.mask = mask_mv64340_irq,
.mask_ack = mask_mv64340_irq,
.unmask = unmask_mv64340_irq,
.end = end_mv64340_irq,
};
@ -149,13 +113,9 @@ void __init mv64340_irq_init(unsigned int base)
{
int i;
/* Reset irq handlers pointers to NULL */
for (i = base; i < base + 64; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
irq_desc[i].chip = &mv64340_irq_type;
}
for (i = base; i < base + 64; i++)
set_irq_chip_and_handler(i, &mv64340_irq_type,
handle_level_irq);
irq_base = base;
}

View File

@ -29,42 +29,6 @@ static inline void mask_rm7k_irq(unsigned int irq)
clear_c0_intcontrol(0x100 << (irq - irq_base));
}
static inline void rm7k_cpu_irq_enable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
unmask_rm7k_irq(irq);
local_irq_restore(flags);
}
static void rm7k_cpu_irq_disable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
mask_rm7k_irq(irq);
local_irq_restore(flags);
}
static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
{
rm7k_cpu_irq_enable(irq);
return 0;
}
#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for rm7k_cpu_irq_end.
*/
static void rm7k_cpu_irq_ack(unsigned int irq)
{
mask_rm7k_irq(irq);
}
static void rm7k_cpu_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -73,11 +37,10 @@ static void rm7k_cpu_irq_end(unsigned int irq)
static struct irq_chip rm7k_irq_controller = {
.typename = "RM7000",
.startup = rm7k_cpu_irq_startup,
.shutdown = rm7k_cpu_irq_shutdown,
.enable = rm7k_cpu_irq_enable,
.disable = rm7k_cpu_irq_disable,
.ack = rm7k_cpu_irq_ack,
.ack = mask_rm7k_irq,
.mask = mask_rm7k_irq,
.mask_ack = mask_rm7k_irq,
.unmask = unmask_rm7k_irq,
.end = rm7k_cpu_irq_end,
};
@ -87,12 +50,9 @@ void __init rm7k_cpu_irq_init(int base)
clear_c0_intcontrol(0x00000f00); /* Mask all */
for (i = base; i < base + 4; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &rm7k_irq_controller;
}
for (i = base; i < base + 4; i++)
set_irq_chip_and_handler(i, &rm7k_irq_controller,
handle_level_irq);
irq_base = base;
}

View File

@ -48,15 +48,6 @@ static void rm9k_cpu_irq_disable(unsigned int irq)
local_irq_restore(flags);
}
static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
{
rm9k_cpu_irq_enable(irq);
return 0;
}
#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
/*
* Performance counter interrupts are global on all processors.
*/
@ -89,16 +80,6 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
}
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for rm9k_cpu_irq_end.
*/
static void rm9k_cpu_irq_ack(unsigned int irq)
{
mask_rm9k_irq(irq);
}
static void rm9k_cpu_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -107,11 +88,10 @@ static void rm9k_cpu_irq_end(unsigned int irq)
static struct irq_chip rm9k_irq_controller = {
.typename = "RM9000",
.startup = rm9k_cpu_irq_startup,
.shutdown = rm9k_cpu_irq_shutdown,
.enable = rm9k_cpu_irq_enable,
.disable = rm9k_cpu_irq_disable,
.ack = rm9k_cpu_irq_ack,
.ack = mask_rm9k_irq,
.mask = mask_rm9k_irq,
.mask_ack = mask_rm9k_irq,
.unmask = unmask_rm9k_irq,
.end = rm9k_cpu_irq_end,
};
@ -119,9 +99,10 @@ static struct irq_chip rm9k_perfcounter_irq = {
.typename = "RM9000",
.startup = rm9k_perfcounter_irq_startup,
.shutdown = rm9k_perfcounter_irq_shutdown,
.enable = rm9k_cpu_irq_enable,
.disable = rm9k_cpu_irq_disable,
.ack = rm9k_cpu_irq_ack,
.ack = mask_rm9k_irq,
.mask = mask_rm9k_irq,
.mask_ack = mask_rm9k_irq,
.unmask = unmask_rm9k_irq,
.end = rm9k_cpu_irq_end,
};
@ -135,15 +116,13 @@ void __init rm9k_cpu_irq_init(int base)
clear_c0_intcontrol(0x0000f000); /* Mask all */
for (i = base; i < base + 4; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &rm9k_irq_controller;
}
for (i = base; i < base + 4; i++)
set_irq_chip_and_handler(i, &rm9k_irq_controller,
handle_level_irq);
rm9000_perfcount_irq = base + 1;
irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq;
set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
handle_level_irq);
irq_base = base;
}

View File

@ -88,25 +88,6 @@ atomic_t irq_err_count;
unsigned long irq_hwmask[NR_IRQS];
#endif /* CONFIG_MIPS_MT_SMTC */
#undef do_IRQ
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
asmlinkage unsigned int do_IRQ(unsigned int irq)
{
irq_enter();
__DO_IRQ_SMTC_HOOK();
__do_IRQ(irq);
irq_exit();
return 1;
}
/*
* Generic, controller-independent functions:
*/
@ -172,19 +153,6 @@ __setup("nokgdb", nokgdb);
void __init init_IRQ(void)
{
int i;
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &no_irq_chip;
spin_lock_init(&irq_desc[i].lock);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask[i] = 0;
#endif /* CONFIG_MIPS_MT_SMTC */
}
arch_init_irq();
#ifdef CONFIG_KGDB

View File

@ -50,44 +50,6 @@ static inline void mask_mips_irq(unsigned int irq)
irq_disable_hazard();
}
static inline void mips_cpu_irq_enable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
unmask_mips_irq(irq);
back_to_back_c0_hazard();
local_irq_restore(flags);
}
static void mips_cpu_irq_disable(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
mask_mips_irq(irq);
back_to_back_c0_hazard();
local_irq_restore(flags);
}
static unsigned int mips_cpu_irq_startup(unsigned int irq)
{
mips_cpu_irq_enable(irq);
return 0;
}
#define mips_cpu_irq_shutdown mips_cpu_irq_disable
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
*/
static void mips_cpu_irq_ack(unsigned int irq)
{
mask_mips_irq(irq);
}
static void mips_cpu_irq_end(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -96,11 +58,11 @@ static void mips_cpu_irq_end(unsigned int irq)
static struct irq_chip mips_cpu_irq_controller = {
.typename = "MIPS",
.startup = mips_cpu_irq_startup,
.shutdown = mips_cpu_irq_shutdown,
.enable = mips_cpu_irq_enable,
.disable = mips_cpu_irq_disable,
.ack = mips_cpu_irq_ack,
.ack = mask_mips_irq,
.mask = mask_mips_irq,
.mask_ack = mask_mips_irq,
.unmask = unmask_mips_irq,
.eoi = unmask_mips_irq,
.end = mips_cpu_irq_end,
};
@ -110,8 +72,6 @@ static struct irq_chip mips_cpu_irq_controller = {
#define unmask_mips_mt_irq unmask_mips_irq
#define mask_mips_mt_irq mask_mips_irq
#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
{
@ -119,13 +79,11 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
evpe(vpflags);
mips_mt_cpu_irq_enable(irq);
unmask_mips_mt_irq(irq);
return 0;
}
#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
/*
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for mips_cpu_irq_end.
@ -143,10 +101,11 @@ static void mips_mt_cpu_irq_ack(unsigned int irq)
static struct irq_chip mips_mt_cpu_irq_controller = {
.typename = "MIPS",
.startup = mips_mt_cpu_irq_startup,
.shutdown = mips_mt_cpu_irq_shutdown,
.enable = mips_mt_cpu_irq_enable,
.disable = mips_mt_cpu_irq_disable,
.ack = mips_mt_cpu_irq_ack,
.mask = mask_mips_mt_irq,
.mask_ack = mips_mt_cpu_irq_ack,
.unmask = unmask_mips_mt_irq,
.eoi = unmask_mips_mt_irq,
.end = mips_mt_cpu_irq_end,
};
@ -163,19 +122,12 @@ void __init mips_cpu_irq_init(int irq_base)
* leave them uninitialized for other processors.
*/
if (cpu_has_mipsmt)
for (i = irq_base; i < irq_base + 2; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &mips_mt_cpu_irq_controller;
}
for (i = irq_base; i < irq_base + 2; i++)
set_irq_chip(i, &mips_mt_cpu_irq_controller);
for (i = irq_base + 2; i < irq_base + 8; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL;
irq_desc[i].depth = 1;
irq_desc[i].chip = &mips_cpu_irq_controller;
}
for (i = irq_base + 2; i < irq_base + 8; i++)
set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
handle_level_irq);
mips_cpu_irq_base = irq_base;
}

View File

@ -0,0 +1,85 @@
/*
* machine_kexec.c for kexec
* Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
const extern unsigned char relocate_new_kernel[];
const extern unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
int
machine_kexec_prepare(struct kimage *kimage)
{
return 0;
}
void
machine_kexec_cleanup(struct kimage *kimage)
{
}
void
machine_shutdown(void)
{
}
void
machine_crash_shutdown(struct pt_regs *regs)
{
}
void
machine_kexec(struct kimage *image)
{
unsigned long reboot_code_buffer;
unsigned long entry;
unsigned long *ptr;
reboot_code_buffer =
(unsigned long)page_address(image->control_code_page);
kexec_start_address = image->start;
kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
memcpy((void*)reboot_code_buffer, relocate_new_kernel,
relocate_new_kernel_size);
/*
* The generic kexec code builds a page list with physical
* addresses. they are directly accessible through KSEG0 (or
* CKSEG0 or XPHYS if on 64bit system), hence the
* pys_to_virt() call.
*/
for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
ptr = (entry & IND_INDIRECTION) ?
phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
*ptr & IND_DESTINATION)
*ptr = phys_to_virt(*ptr);
}
/*
* we do not want to be bothered.
*/
local_irq_disable();
flush_icache_range(reboot_code_buffer,
reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
printk("Will call new kernel at %08x\n", image->start);
printk("Bye ...\n");
flush_cache_all();
((void (*)(void))reboot_code_buffer)();
}

View File

@ -29,6 +29,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/pgtable.h> /* MODULE_START */
struct mips_hi16 {
struct mips_hi16 *next;
@ -43,9 +44,23 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
#ifdef MODULE_START
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!size)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
#else
if (size == 0)
return NULL;
return vmalloc(size);
#endif
}
/* Free memory returned from module_alloc */

View File

@ -0,0 +1,80 @@
/*
* relocate_kernel.S for kexec
* Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
#include <asm/addrspace.h>
.globl relocate_new_kernel
relocate_new_kernel:
PTR_L s0, kexec_indirection_page
PTR_L s1, kexec_start_address
process_entry:
PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG
/* destination page */
and s3, s2, 0x1
beq s3, zero, 1f
and s4, s2, ~0x1 /* store destination addr in s4 */
move a0, s4
b process_entry
1:
/* indirection page, update s0 */
and s3, s2, 0x2
beq s3, zero, 1f
and s0, s2, ~0x2
b process_entry
1:
/* done page */
and s3, s2, 0x4
beq s3, zero, 1f
b done
1:
/* source page */
and s3, s2, 0x8
beq s3, zero, process_entry
and s2, s2, ~0x8
li s6, (1 << PAGE_SHIFT) / SZREG
copy_word:
/* copy page word by word */
REG_L s5, (s2)
REG_S s5, (s4)
INT_ADD s4, s4, SZREG
INT_ADD s2, s2, SZREG
INT_SUB s6, s6, 1
beq s6, zero, process_entry
b copy_word
b process_entry
done:
/* jump to kexec_start_address */
j s1
.globl kexec_start_address
kexec_start_address:
.long 0x0
.globl kexec_indirection_page
kexec_indirection_page:
.long 0x0
relocate_new_kernel_end:
.globl relocate_new_kernel_size
relocate_new_kernel_size:
.long relocate_new_kernel_end - relocate_new_kernel

View File

@ -653,7 +653,7 @@ einval: li v0, -EINVAL
sys sys_move_pages 6
sys sys_set_robust_list 2
sys sys_get_robust_list 3 /* 4310 */
sys sys_ni_syscall 0
sys sys_kexec_load 4
sys sys_getcpu 3
sys sys_epoll_pwait 6
.endm

View File

@ -468,6 +468,6 @@ sys_call_table:
PTR sys_move_pages
PTR sys_set_robust_list
PTR sys_get_robust_list
PTR sys_ni_syscall /* 5270 */
PTR sys_kexec_load /* 5270 */
PTR sys_getcpu
PTR sys_epoll_pwait

View File

@ -394,6 +394,6 @@ EXPORT(sysn32_call_table)
PTR sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list
PTR sys_ni_syscall
PTR compat_sys_kexec_load
PTR sys_getcpu
PTR sys_epoll_pwait

View File

@ -516,7 +516,7 @@ sys_call_table:
PTR compat_sys_move_pages
PTR compat_sys_set_robust_list
PTR compat_sys_get_robust_list /* 4310 */
PTR sys_ni_syscall
PTR compat_sys_kexec_load
PTR sys_getcpu
PTR sys_epoll_pwait
.size sys_call_table,.-sys_call_table

View File

@ -145,13 +145,12 @@ static int __init rd_start_early(char *p)
unsigned long start = memparse(p, &p);
#ifdef CONFIG_64BIT
/* HACK: Guess if the sign extension was forgotten */
if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
start |= 0xffffffff00000000UL;
/* Guess if the sign extension was forgotten by bootloader */
if (start < XKPHYS)
start = (int)start;
#endif
initrd_start = start;
initrd_end += start;
return 0;
}
early_param("rd_start", rd_start_early);
@ -159,41 +158,64 @@ early_param("rd_start", rd_start_early);
static int __init rd_size_early(char *p)
{
initrd_end += memparse(p, &p);
return 0;
}
early_param("rd_size", rd_size_early);
/* it returns the next free pfn after initrd */
static unsigned long __init init_initrd(void)
{
unsigned long tmp, end, size;
unsigned long end;
u32 *initrd_header;
ROOT_DEV = Root_RAM0;
/*
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end. In these cases
* perfom sanity checks and use them if all looks good.
*/
size = initrd_end - initrd_start;
if (initrd_end == 0 || size == 0) {
initrd_start = 0;
initrd_end = 0;
} else
return initrd_end;
if (initrd_start && initrd_end > initrd_start)
goto sanitize;
end = (unsigned long)&_end;
tmp = PAGE_ALIGN(end) - sizeof(u32) * 2;
if (tmp < end)
tmp += PAGE_SIZE;
/*
* See if initrd has been added to the kernel image by
* arch/mips/boot/addinitrd.c. In that case a header is
* prepended to initrd and is made up by 8 bytes. The fisrt
* word is a magic number and the second one is the size of
* initrd. Initrd start must be page aligned in any cases.
*/
initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
if (initrd_header[0] != 0x494E5244)
goto disable;
initrd_start = (unsigned long)(initrd_header + 2);
initrd_end = initrd_start + initrd_header[1];
initrd_header = (u32 *)tmp;
if (initrd_header[0] == 0x494E5244) {
initrd_start = (unsigned long)&initrd_header[2];
initrd_end = initrd_start + initrd_header[1];
sanitize:
if (initrd_start & ~PAGE_MASK) {
printk(KERN_ERR "initrd start must be page aligned\n");
goto disable;
}
return initrd_end;
if (initrd_start < PAGE_OFFSET) {
printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
goto disable;
}
/*
* Sanitize initrd addresses. For example firmware
* can't guess if they need to pass them through
* 64-bits values if the kernel has been built in pure
* 32-bit. We need also to switch from KSEG0 to XKPHYS
* addresses now, so the code can now safely use __pa().
*/
end = __pa(initrd_end);
initrd_end = (unsigned long)__va(end);
initrd_start = (unsigned long)__va(__pa(initrd_start));
ROOT_DEV = Root_RAM0;
return PFN_UP(end);
disable:
initrd_start = 0;
initrd_end = 0;
return 0;
}
static void __init finalize_initrd(void)
@ -204,12 +226,12 @@ static void __init finalize_initrd(void)
printk(KERN_INFO "Initrd not found or empty");
goto disable;
}
if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
printk("Initrd extends beyond end of memory");
goto disable;
}
reserve_bootmem(CPHYSADDR(initrd_start), size);
reserve_bootmem(__pa(initrd_start), size);
initrd_below_start_ok = 1;
printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@ -259,8 +281,7 @@ static void __init bootmem_init(void)
* not selected. Once that done we can determine the low bound
* of usable memory.
*/
reserved_end = init_initrd();
reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
/*
* Find the highest page frame number we have available.
@ -432,10 +453,10 @@ static void __init resource_init(void)
if (UNCAC_BASE != IO_BASE)
return;
code_resource.start = virt_to_phys(&_text);
code_resource.end = virt_to_phys(&_etext) - 1;
data_resource.start = virt_to_phys(&_etext);
data_resource.end = virt_to_phys(&_edata) - 1;
code_resource.start = __pa_symbol(&_text);
code_resource.end = __pa_symbol(&_etext) - 1;
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
/*
* Request address space for all standard RAM.

View File

@ -17,7 +17,6 @@
*/
#include <linux/cache.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>

View File

@ -278,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
/* need to mark IPI's as IRQ_PER_CPU */
irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
}
/*

View File

@ -1009,6 +1009,7 @@ void setup_cross_vpe_interrupts(void)
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
}
/*

View File

@ -11,7 +11,6 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/clocksource.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@ -83,17 +82,11 @@ static void null_timer_ack(void) { /* nothing */ }
/*
* Null high precision timer functions for systems lacking one.
*/
static unsigned int null_hpt_read(void)
static cycle_t null_hpt_read(void)
{
return 0;
}
static void __init null_hpt_init(void)
{
/* nothing */
}
/*
* Timer ack for an R4k-compatible timer of a known frequency.
*/
@ -118,7 +111,7 @@ static void c0_timer_ack(void)
/*
* High precision timer functions for a R4k-compatible timer.
*/
static unsigned int c0_hpt_read(void)
static cycle_t c0_hpt_read(void)
{
return read_c0_count();
}
@ -132,9 +125,6 @@ static void __init c0_hpt_timer_init(void)
int (*mips_timer_state)(void);
void (*mips_timer_ack)(void);
unsigned int (*mips_hpt_read)(void);
void (*mips_hpt_init)(void) __initdata = null_hpt_init;
unsigned int mips_hpt_mask = 0xffffffff;
/* last time when xtime and rtc are sync'ed up */
static long last_rtc_update;
@ -276,8 +266,7 @@ static struct irqaction timer_irqaction = {
static unsigned int __init calibrate_hpt(void)
{
u64 frequency;
u32 hpt_start, hpt_end, hpt_count, hz;
cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
const int loops = HZ / 10;
int log_2_loops = 0;
@ -303,28 +292,23 @@ static unsigned int __init calibrate_hpt(void)
* during the calculated number of periods between timer
* interrupts.
*/
hpt_start = mips_hpt_read();
hpt_start = clocksource_mips.read();
do {
while (mips_timer_state());
while (!mips_timer_state());
} while (--i);
hpt_end = mips_hpt_read();
hpt_end = clocksource_mips.read();
hpt_count = (hpt_end - hpt_start) & mips_hpt_mask;
hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
hz = HZ;
frequency = (u64)hpt_count * (u64)hz;
frequency = hpt_count * hz;
return frequency >> log_2_loops;
}
static cycle_t read_mips_hpt(void)
{
return (cycle_t)mips_hpt_read();
}
static struct clocksource clocksource_mips = {
struct clocksource clocksource_mips = {
.name = "MIPS",
.read = read_mips_hpt,
.mask = 0xffffffff,
.is_continuous = 1,
};
@ -333,7 +317,7 @@ static void __init init_mips_clocksource(void)
u64 temp;
u32 shift;
if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read)
if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
return;
/* Calclate a somewhat reasonable rating value */
@ -347,7 +331,6 @@ static void __init init_mips_clocksource(void)
}
clocksource_mips.shift = shift;
clocksource_mips.mult = (u32)temp;
clocksource_mips.mask = mips_hpt_mask;
clocksource_register(&clocksource_mips);
}
@ -367,32 +350,36 @@ void __init time_init(void)
-xtime.tv_sec, -xtime.tv_nsec);
/* Choose appropriate high precision timer routines. */
if (!cpu_has_counter && !mips_hpt_read)
if (!cpu_has_counter && !clocksource_mips.read)
/* No high precision timer -- sorry. */
mips_hpt_read = null_hpt_read;
clocksource_mips.read = null_hpt_read;
else if (!mips_hpt_frequency && !mips_timer_state) {
/* A high precision timer of unknown frequency. */
if (!mips_hpt_read)
if (!clocksource_mips.read)
/* No external high precision timer -- use R4k. */
mips_hpt_read = c0_hpt_read;
clocksource_mips.read = c0_hpt_read;
} else {
/* We know counter frequency. Or we can get it. */
if (!mips_hpt_read) {
if (!clocksource_mips.read) {
/* No external high precision timer -- use R4k. */
mips_hpt_read = c0_hpt_read;
clocksource_mips.read = c0_hpt_read;
if (!mips_timer_state) {
/* No external timer interrupt -- use R4k. */
mips_hpt_init = c0_hpt_timer_init;
mips_timer_ack = c0_timer_ack;
/* Calculate cache parameters. */
cycles_per_jiffy =
(mips_hpt_frequency + HZ / 2) / HZ;
/*
* This sets up the high precision
* timer for the first interrupt.
*/
c0_hpt_timer_init();
}
}
if (!mips_hpt_frequency)
mips_hpt_frequency = calibrate_hpt();
/* Calculate cache parameters. */
cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
/* Report the high precision timer rate for a reference. */
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
@ -403,9 +390,6 @@ void __init time_init(void)
/* No timer interrupt ack (e.g. i8254). */
mips_timer_ack = null_timer_ack;
/* This sets up the high precision timer for the first interrupt. */
mips_hpt_init();
/*
* Call board specific timer interrupt setup.
*

View File

@ -54,6 +54,8 @@ extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
@ -397,19 +399,6 @@ asmlinkage void do_be(struct pt_regs *regs)
force_sig(SIGBUS, current);
}
static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
{
unsigned int __user *epc;
epc = (unsigned int __user *) regs->cp0_epc +
((regs->cp0_cause & CAUSEF_BD) != 0);
if (!get_user(*opcode, epc))
return 0;
force_sig(SIGSEGV, current);
return 1;
}
/*
* ll/sc emulation
*/
@ -544,8 +533,8 @@ static inline int simulate_llsc(struct pt_regs *regs)
{
unsigned int opcode;
if (unlikely(get_insn_opcode(regs, &opcode)))
return -EFAULT;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
if ((opcode & OPCODE) == LL) {
simulate_ll(regs, opcode);
@ -557,6 +546,10 @@ static inline int simulate_llsc(struct pt_regs *regs)
}
return -EFAULT; /* Strange things going on ... */
out_sigsegv:
force_sig(SIGSEGV, current);
return -EFAULT;
}
/*
@ -569,8 +562,8 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
struct thread_info *ti = task_thread_info(current);
unsigned int opcode;
if (unlikely(get_insn_opcode(regs, &opcode)))
return -EFAULT;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
if (unlikely(compute_return_epc(regs)))
return -EFAULT;
@ -589,6 +582,10 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
/* Not ours. */
return -EFAULT;
out_sigsegv:
force_sig(SIGSEGV, current);
return -EFAULT;
}
asmlinkage void do_ov(struct pt_regs *regs)
@ -672,10 +669,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
unsigned int opcode, bcode;
siginfo_t info;
die_if_kernel("Break instruction in kernel code", regs);
if (get_insn_opcode(regs, &opcode))
return;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
* There is the ancient bug in the MIPS assemblers that the break
@ -696,6 +691,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
switch (bcode) {
case BRK_OVERFLOW << 10:
case BRK_DIVZERO << 10:
die_if_kernel("Break instruction in kernel code", regs);
if (bcode == (BRK_DIVZERO << 10))
info.si_code = FPE_INTDIV;
else
@ -705,9 +701,16 @@ asmlinkage void do_bp(struct pt_regs *regs)
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
die("Kernel bug detected", regs);
break;
default:
die_if_kernel("Break instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
@ -715,10 +718,8 @@ asmlinkage void do_tr(struct pt_regs *regs)
unsigned int opcode, tcode = 0;
siginfo_t info;
die_if_kernel("Trap instruction in kernel code", regs);
if (get_insn_opcode(regs, &opcode))
return;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
@ -733,6 +734,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
switch (tcode) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
die_if_kernel("Trap instruction in kernel code", regs);
if (tcode == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
@ -742,9 +744,16 @@ asmlinkage void do_tr(struct pt_regs *regs)
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
die("Kernel bug detected", regs);
break;
default:
die_if_kernel("Trap instruction in kernel code", regs);
force_sig(SIGTRAP, current);
}
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
@ -1423,6 +1432,15 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
memcpy((void *)(uncached_ebase + offset), addr, size);
}
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
rdhwr_noopt = 1;
return 1;
}
__setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
@ -1502,7 +1520,9 @@ void __init trap_init(void)
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, handle_ri);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);

View File

@ -36,33 +36,14 @@ static volatile int lasat_int_mask_shift;
void disable_lasat_irq(unsigned int irq_nr)
{
unsigned long flags;
local_irq_save(flags);
*lasat_int_mask &= ~(1 << irq_nr) << lasat_int_mask_shift;
local_irq_restore(flags);
}
void enable_lasat_irq(unsigned int irq_nr)
{
unsigned long flags;
local_irq_save(flags);
*lasat_int_mask |= (1 << irq_nr) << lasat_int_mask_shift;
local_irq_restore(flags);
}
static unsigned int startup_lasat_irq(unsigned int irq)
{
enable_lasat_irq(irq);
return 0; /* never anything pending */
}
#define shutdown_lasat_irq disable_lasat_irq
#define mask_and_ack_lasat_irq disable_lasat_irq
static void end_lasat_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -71,11 +52,10 @@ static void end_lasat_irq(unsigned int irq)
static struct irq_chip lasat_irq_type = {
.typename = "Lasat",
.startup = startup_lasat_irq,
.shutdown = shutdown_lasat_irq,
.enable = enable_lasat_irq,
.disable = disable_lasat_irq,
.ack = mask_and_ack_lasat_irq,
.ack = disable_lasat_irq,
.mask = disable_lasat_irq,
.mask_ack = disable_lasat_irq,
.unmask = enable_lasat_irq,
.end = end_lasat_irq,
};
@ -152,10 +132,6 @@ void __init arch_init_irq(void)
panic("arch_init_irq: mips_machtype incorrect");
}
for (i = 0; i <= LASATINT_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &lasat_irq_type;
}
for (i = 0; i <= LASATINT_END; i++)
set_irq_chip_and_handler(i, &lasat_irq_type, handle_level_irq);
}

View File

@ -62,16 +62,6 @@ void enable_atlas_irq(unsigned int irq_nr)
iob();
}
static unsigned int startup_atlas_irq(unsigned int irq)
{
enable_atlas_irq(irq);
return 0; /* never anything pending */
}
#define shutdown_atlas_irq disable_atlas_irq
#define mask_and_ack_atlas_irq disable_atlas_irq
static void end_atlas_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -80,11 +70,11 @@ static void end_atlas_irq(unsigned int irq)
static struct irq_chip atlas_irq_type = {
.typename = "Atlas",
.startup = startup_atlas_irq,
.shutdown = shutdown_atlas_irq,
.enable = enable_atlas_irq,
.disable = disable_atlas_irq,
.ack = mask_and_ack_atlas_irq,
.ack = disable_atlas_irq,
.mask = disable_atlas_irq,
.mask_ack = disable_atlas_irq,
.unmask = enable_atlas_irq,
.eoi = enable_atlas_irq,
.end = end_atlas_irq,
};
@ -217,13 +207,8 @@ static inline void init_atlas_irqs (int base)
*/
atlas_hw0_icregs->intrsten = 0xffffffff;
for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &atlas_irq_type;
spin_lock_init(&irq_desc[i].lock);
}
for (i = ATLAS_INT_BASE; i <= ATLAS_INT_END; i++)
set_irq_chip_and_handler(i, &atlas_irq_type, handle_level_irq);
}
static struct irqaction atlasirq = {

View File

@ -288,6 +288,7 @@ void __init plat_timer_setup(struct irqaction *irq)
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
/* to generate the first timer interrupt */

View File

@ -3,6 +3,9 @@
#include <linux/kernel_stat.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/timex.h>
#include <asm/mipsregs.h>
#include <asm/ptrace.h>
@ -10,24 +13,14 @@
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/time.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/timex.h>
#include <asm/mipsregs.h>
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
#include <asm/smp.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/simint.h>
#include <asm/mc146818-time.h>
#include <asm/smp.h>
unsigned long cpu_khz;
@ -203,7 +196,8 @@ void __init plat_timer_setup(struct irqaction *irq)
on seperate cpu's the first one tries to handle the second interrupt.
The effect is that the int remains disabled on the second cpu.
Mark the interrupt with IRQ_PER_CPU to avoid any confusion */
irq_desc[mips_cpu_timer_irq].status |= IRQ_PER_CPU;
irq_desc[mips_cpu_timer_irq].flags |= IRQ_PER_CPU;
set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq);
#endif
/* to generate the first timer interrupt */

View File

@ -323,7 +323,6 @@ static void __init r4k_blast_scache_setup(void)
static inline void local_r4k_flush_cache_all(void * args)
{
r4k_blast_dcache();
r4k_blast_icache();
}
static void r4k_flush_cache_all(void)
@ -359,21 +358,19 @@ static void r4k___flush_cache_all(void)
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
int exec;
if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
return;
exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || exec)
r4k_blast_dcache();
if (exec)
r4k_blast_icache();
r4k_blast_dcache();
}
static void r4k_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (!cpu_has_dc_aliases)
return;
r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
}
@ -384,18 +381,21 @@ static inline void local_r4k_flush_cache_mm(void * args)
if (!cpu_context(smp_processor_id(), mm))
return;
r4k_blast_dcache();
r4k_blast_icache();
/*
* Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
* only flush the primary caches but R10000 and R12000 behave sane ...
* R4000SC and R4400SC indexed S-cache ops also invalidate primary
* caches, so we can bail out early.
*/
if (current_cpu_data.cputype == CPU_R4000SC ||
current_cpu_data.cputype == CPU_R4000MC ||
current_cpu_data.cputype == CPU_R4400SC ||
current_cpu_data.cputype == CPU_R4400MC)
current_cpu_data.cputype == CPU_R4400MC) {
r4k_blast_scache();
return;
}
r4k_blast_dcache();
}
static void r4k_flush_cache_mm(struct mm_struct *mm)

View File

@ -60,6 +60,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
*/
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto vmalloc_fault;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto vmalloc_fault;
#endif
/*
* If we're in an interrupt or have no user

View File

@ -90,9 +90,9 @@ unsigned long setup_zero_pages(void)
if (!empty_zero_page)
panic("Oh boy, that early out of memory?");
page = virt_to_page(empty_zero_page);
page = virt_to_page((void *)empty_zero_page);
split_page(page, order);
while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
SetPageReserved(page);
page++;
}
@ -443,15 +443,18 @@ void __init mem_init(void)
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
void free_init_pages(char *what, unsigned long begin, unsigned long end)
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
unsigned long addr;
unsigned long pfn;
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr);
for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
struct page *page = pfn_to_page(pfn);
void *addr = phys_to_virt(PFN_PHYS(pfn));
ClearPageReserved(page);
init_page_count(page);
memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
__free_page(page);
totalram_pages++;
}
printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
@ -460,12 +463,9 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
#ifdef CONFIG_64BIT
/* Switch from KSEG0 to XKPHYS addresses */
start = (unsigned long)phys_to_virt(CPHYSADDR(start));
end = (unsigned long)phys_to_virt(CPHYSADDR(end));
#endif
free_init_pages("initrd memory", start, end);
free_init_pages("initrd memory",
virt_to_phys((void *)start),
virt_to_phys((void *)end));
}
#endif
@ -473,17 +473,13 @@ extern unsigned long prom_free_prom_memory(void);
void free_initmem(void)
{
unsigned long start, end, freed;
unsigned long freed;
freed = prom_free_prom_memory();
if (freed)
printk(KERN_INFO "Freeing firmware memory: %ldk freed\n",freed);
start = (unsigned long)(&__init_begin);
end = (unsigned long)(&__init_end);
#ifdef CONFIG_64BIT
start = PAGE_OFFSET | CPHYSADDR(start);
end = PAGE_OFFSET | CPHYSADDR(end);
#endif
free_init_pages("unused kernel memory", start, end);
free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin),
__pa_symbol(&__init_end));
}

View File

@ -58,6 +58,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
#ifdef MODULE_START
pgd_init((unsigned long)module_pg_dir);
#endif
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
pgd_base = swapper_pg_dir;

View File

@ -423,6 +423,9 @@ enum label_id {
label_invalid,
label_second_part,
label_leave,
#ifdef MODULE_START
label_module_alloc,
#endif
label_vmalloc,
label_vmalloc_done,
label_tlbw_hazard,
@ -455,6 +458,9 @@ static __init void build_label(struct label **lab, u32 *addr,
L_LA(_second_part)
L_LA(_leave)
#ifdef MODULE_START
L_LA(_module_alloc)
#endif
L_LA(_vmalloc)
L_LA(_vmalloc_done)
L_LA(_tlbw_hazard)
@ -686,6 +692,13 @@ static void __init il_bgezl(u32 **p, struct reloc **r, unsigned int reg,
i_bgezl(p, reg, 0);
}
static void __init __attribute__((unused))
il_bgez(u32 **p, struct reloc **r, unsigned int reg, enum label_id l)
{
r_mips_pc16(r, *p, l);
i_bgez(p, reg, 0);
}
/* The only general purpose registers allowed in TLB handlers. */
#define K0 26
#define K1 27
@ -970,7 +983,11 @@ build_get_pmde64(u32 **p, struct label **l, struct reloc **r,
* The vmalloc handling is not in the hotpath.
*/
i_dmfc0(p, tmp, C0_BADVADDR);
#ifdef MODULE_START
il_bltz(p, r, tmp, label_module_alloc);
#else
il_bltz(p, r, tmp, label_vmalloc);
#endif
/* No i_nop needed here, since the next insn doesn't touch TMP. */
#ifdef CONFIG_SMP
@ -1023,8 +1040,46 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r,
{
long swpd = (long)swapper_pg_dir;
#ifdef MODULE_START
long modd = (long)module_pg_dir;
l_module_alloc(l, *p);
/*
* Assumption:
* VMALLOC_START >= 0xc000000000000000UL
* MODULE_START >= 0xe000000000000000UL
*/
i_SLL(p, ptr, bvaddr, 2);
il_bgez(p, r, ptr, label_vmalloc);
if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START)) {
i_lui(p, ptr, rel_hi(MODULE_START)); /* delay slot */
} else {
/* unlikely configuration */
i_nop(p); /* delay slot */
i_LA(p, ptr, MODULE_START);
}
i_dsubu(p, bvaddr, bvaddr, ptr);
if (in_compat_space_p(modd) && !rel_lo(modd)) {
il_b(p, r, label_vmalloc_done);
i_lui(p, ptr, rel_hi(modd));
} else {
i_LA_mostly(p, ptr, modd);
il_b(p, r, label_vmalloc_done);
i_daddiu(p, ptr, ptr, rel_lo(modd));
}
l_vmalloc(l, *p);
if (in_compat_space_p(MODULE_START) && !rel_lo(MODULE_START) &&
MODULE_START << 32 == VMALLOC_START)
i_dsll32(p, ptr, ptr, 0); /* typical case */
else
i_LA(p, ptr, VMALLOC_START);
#else
l_vmalloc(l, *p);
i_LA(p, ptr, VMALLOC_START);
#endif
i_dsubu(p, bvaddr, bvaddr, ptr);
if (in_compat_space_p(swpd) && !rel_lo(swpd)) {

View File

@ -65,39 +65,6 @@ static inline void unmask_cpci_irq(unsigned int irq)
value = OCELOT_FPGA_READ(INTMASK);
}
/*
* Enables the IRQ in the FPGA
*/
static void enable_cpci_irq(unsigned int irq)
{
unmask_cpci_irq(irq);
}
/*
* Initialize the IRQ in the FPGA
*/
static unsigned int startup_cpci_irq(unsigned int irq)
{
unmask_cpci_irq(irq);
return 0;
}
/*
* Disables the IRQ in the FPGA
*/
static void disable_cpci_irq(unsigned int irq)
{
mask_cpci_irq(irq);
}
/*
* Masks and ACKs an IRQ
*/
static void mask_and_ack_cpci_irq(unsigned int irq)
{
mask_cpci_irq(irq);
}
/*
* End IRQ processing
*/
@ -125,15 +92,12 @@ void ll_cpci_irq(void)
do_IRQ(ls1bit8(irq_src) + CPCI_IRQ_BASE);
}
#define shutdown_cpci_irq disable_cpci_irq
struct irq_chip cpci_irq_type = {
.typename = "CPCI/FPGA",
.startup = startup_cpci_irq,
.shutdown = shutdown_cpci_irq,
.enable = enable_cpci_irq,
.disable = disable_cpci_irq,
.ack = mask_and_ack_cpci_irq,
.ack = mask_cpci_irq,
.mask = mask_cpci_irq,
.mask_ack = mask_cpci_irq,
.unmask = unmask_cpci_irq,
.end = end_cpci_irq,
};
@ -141,11 +105,6 @@ void cpci_irq_init(void)
{
int i;
/* Reset irq handlers pointers to NULL */
for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
irq_desc[i].chip = &cpci_irq_type;
}
for (i = CPCI_IRQ_BASE; i < (CPCI_IRQ_BASE + 8); i++)
set_irq_chip_and_handler(i, &cpci_irq_type, handle_level_irq);
}

View File

@ -59,39 +59,6 @@ static inline void unmask_uart_irq(unsigned int irq)
value = OCELOT_FPGA_READ(UART_INTMASK);
}
/*
* Enables the IRQ in the FPGA
*/
static void enable_uart_irq(unsigned int irq)
{
unmask_uart_irq(irq);
}
/*
* Initialize the IRQ in the FPGA
*/
static unsigned int startup_uart_irq(unsigned int irq)
{
unmask_uart_irq(irq);
return 0;
}
/*
* Disables the IRQ in the FPGA
*/
static void disable_uart_irq(unsigned int irq)
{
mask_uart_irq(irq);
}
/*
* Masks and ACKs an IRQ
*/
static void mask_and_ack_uart_irq(unsigned int irq)
{
mask_uart_irq(irq);
}
/*
* End IRQ processing
*/
@ -118,28 +85,17 @@ void ll_uart_irq(void)
do_IRQ(ls1bit8(irq_src) + 74);
}
#define shutdown_uart_irq disable_uart_irq
struct irq_chip uart_irq_type = {
.typename = "UART/FPGA",
.startup = startup_uart_irq,
.shutdown = shutdown_uart_irq,
.enable = enable_uart_irq,
.disable = disable_uart_irq,
.ack = mask_and_ack_uart_irq,
.ack = mask_uart_irq,
.mask = mask_uart_irq,
.mask_ack = mask_uart_irq,
.unmask = unmask_uart_irq,
.end = end_uart_irq,
};
void uart_irq_init(void)
{
/* Reset irq handlers pointers to NULL */
irq_desc[80].status = IRQ_DISABLED;
irq_desc[80].action = 0;
irq_desc[80].depth = 2;
irq_desc[80].chip = &uart_irq_type;
irq_desc[81].status = IRQ_DISABLED;
irq_desc[81].action = 0;
irq_desc[81].depth = 2;
irq_desc[81].chip = &uart_irq_type;
set_irq_chip_and_handler(80, &uart_irq_type, handle_level_irq);
set_irq_chip_and_handler(81, &uart_irq_type, handle_level_irq);
}

View File

@ -12,5 +12,6 @@ oprofile-y := $(DRIVER_OBJS) common.o
oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
oprofile-$(CONFIG_CPU_RM9000) += op_model_rm9000.o

View File

@ -83,6 +83,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
case CPU_74K:
case CPU_SB1:
case CPU_SB1A:
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
lmodel = &op_model_mipsxx_ops;
break;

View File

@ -18,7 +18,7 @@
#define M_PERFCTL_SUPERVISOR (1UL << 2)
#define M_PERFCTL_USER (1UL << 3)
#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
#define M_PERFCTL_EVENT(event) ((event) << 5)
#define M_PERFCTL_EVENT(event) (((event) & 0x3f) << 5)
#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
@ -218,13 +218,23 @@ static inline int __n_counters(void)
static inline int n_counters(void)
{
int counters = __n_counters();
int counters;
switch (current_cpu_data.cputype) {
case CPU_R10000:
counters = 2;
case CPU_R12000:
case CPU_R14000:
counters = 4;
default:
counters = __n_counters();
}
#ifdef CONFIG_MIPS_MT_SMP
if (current_cpu_data.cputype == CPU_34K)
return counters >> 1;
counters >> 1;
#endif
return counters;
}
@ -284,6 +294,18 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/5K";
break;
case CPU_R10000:
if ((current_cpu_data.processor_id & 0xff) == 0x20)
op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
else
op_model_mipsxx_ops.cpu_type = "mips/r10000";
break;
case CPU_R12000:
case CPU_R14000:
op_model_mipsxx_ops.cpu_type = "mips/r12000";
break;
case CPU_SB1:
case CPU_SB1A:
op_model_mipsxx_ops.cpu_type = "mips/sb1";

View File

@ -94,22 +94,21 @@ static void qube_raq_galileo_fixup(struct pci_dev *dev)
#if 0
if (galileo_id >= 0x10) {
/* New Galileo, assumes PCI stop line to VIA is connected. */
GALILEO_OUTL(0x4020, GT_PCI0_TOR_OFS);
GT_WRITE(GT_PCI0_TOR_OFS, 0x4020);
} else if (galileo_id == 0x1 || galileo_id == 0x2)
#endif
{
signed int timeo;
/* XXX WE MUST DO THIS ELSE GALILEO LOCKS UP! -DaveM */
timeo = GALILEO_INL(GT_PCI0_TOR_OFS);
timeo = GT_READ(GT_PCI0_TOR_OFS);
/* Old Galileo, assumes PCI STOP line to VIA is disconnected. */
GALILEO_OUTL(
GT_WRITE(GT_PCI0_TOR_OFS,
(0xff << 16) | /* retry count */
(0xff << 8) | /* timeout 1 */
0xff, /* timeout 0 */
GT_PCI0_TOR_OFS);
0xff); /* timeout 0 */
/* enable PCI retry exceeded interrupt */
GALILEO_OUTL(GALILEO_INTR_RETRY_CTR | GALILEO_INL(GT_INTRMASK_OFS), GT_INTRMASK_OFS);
GT_WRITE(GT_INTRMASK_OFS, GT_INTR_RETRYCTR0_MSK | GT_READ(GT_INTRMASK_OFS));
}
}

View File

@ -38,18 +38,18 @@ static int gt64111_pci_read_config(struct pci_bus *bus, unsigned int devfn,
switch (size) {
case 4:
PCI_CFG_SET(devfn, where);
*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
*val = GT_READ(GT_PCI0_CFGDATA_OFS);
return PCIBIOS_SUCCESSFUL;
case 2:
PCI_CFG_SET(devfn, (where & ~0x3));
*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
*val = GT_READ(GT_PCI0_CFGDATA_OFS)
>> ((where & 3) * 8);
return PCIBIOS_SUCCESSFUL;
case 1:
PCI_CFG_SET(devfn, (where & ~0x3));
*val = GALILEO_INL(GT_PCI0_CFGDATA_OFS)
*val = GT_READ(GT_PCI0_CFGDATA_OFS)
>> ((where & 3) * 8);
return PCIBIOS_SUCCESSFUL;
}
@ -68,25 +68,25 @@ static int gt64111_pci_write_config(struct pci_bus *bus, unsigned int devfn,
switch (size) {
case 4:
PCI_CFG_SET(devfn, where);
GALILEO_OUTL(val, GT_PCI0_CFGDATA_OFS);
GT_WRITE(GT_PCI0_CFGDATA_OFS, val);
return PCIBIOS_SUCCESSFUL;
case 2:
PCI_CFG_SET(devfn, (where & ~0x3));
tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
tmp &= ~(0xffff << ((where & 0x3) * 8));
tmp |= (val << ((where & 0x3) * 8));
GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
return PCIBIOS_SUCCESSFUL;
case 1:
PCI_CFG_SET(devfn, (where & ~0x3));
tmp = GALILEO_INL(GT_PCI0_CFGDATA_OFS);
tmp = GT_READ(GT_PCI0_CFGDATA_OFS);
tmp &= ~(0xff << ((where & 0x3) * 8));
tmp |= (val << ((where & 0x3) * 8));
GALILEO_OUTL(tmp, GT_PCI0_CFGDATA_OFS);
GT_WRITE(GT_PCI0_CFGDATA_OFS, tmp);
return PCIBIOS_SUCCESSFUL;
}

View File

@ -38,8 +38,6 @@
#include <int.h>
#include <uart.h>
static DEFINE_SPINLOCK(irq_lock);
/* default prio for interrupts */
/* first one is a no-no so therefore always prio 0 (disabled) */
static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
@ -149,38 +147,6 @@ static inline void unmask_irq(unsigned int irq_nr)
}
}
#define pnx8550_disable pnx8550_ack
static void pnx8550_ack(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
mask_irq(irq);
spin_unlock_irqrestore(&irq_lock, flags);
}
#define pnx8550_enable pnx8550_unmask
static void pnx8550_unmask(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&irq_lock, flags);
unmask_irq(irq);
spin_unlock_irqrestore(&irq_lock, flags);
}
static unsigned int startup_irq(unsigned int irq_nr)
{
pnx8550_unmask(irq_nr);
return 0;
}
static void shutdown_irq(unsigned int irq_nr)
{
pnx8550_ack(irq_nr);
return;
}
int pnx8550_set_gic_priority(int irq, int priority)
{
int gic_irq = irq-PNX8550_INT_GIC_MIN;
@ -192,26 +158,19 @@ int pnx8550_set_gic_priority(int irq, int priority)
return prev_priority;
}
static inline void mask_and_ack_level_irq(unsigned int irq)
{
pnx8550_disable(irq);
return;
}
static void end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) {
pnx8550_enable(irq);
unmask_irq(irq);
}
}
static struct irq_chip level_irq_type = {
.typename = "PNX Level IRQ",
.startup = startup_irq,
.shutdown = shutdown_irq,
.enable = pnx8550_enable,
.disable = pnx8550_disable,
.ack = mask_and_ack_level_irq,
.ack = mask_irq,
.mask = mask_irq,
.mask_ack = mask_irq,
.unmask = unmask_irq,
.end = end_irq,
};
@ -233,8 +192,8 @@ void __init arch_init_irq(void)
int configPR;
for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++) {
irq_desc[i].chip = &level_irq_type;
pnx8550_ack(i); /* mask the irq just in case */
set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
mask_irq(i); /* mask the irq just in case */
}
/* init of GIC/IPC interrupts */
@ -270,7 +229,7 @@ void __init arch_init_irq(void)
/* mask/priority is still 0 so we will not get any
* interrupts until it is unmasked */
irq_desc[i].chip = &level_irq_type;
set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
}
/* Priority level 0 */
@ -279,20 +238,21 @@ void __init arch_init_irq(void)
/* Set int vector table address */
PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
irq_desc[MIPS_CPU_GIC_IRQ].chip = &level_irq_type;
set_irq_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
/* init of Timer interrupts */
for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++) {
irq_desc[i].chip = &level_irq_type;
}
for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
set_irq_chip_and_handler(i, &level_irq_type, handle_level_irq);
/* Stop Timer 1-3 */
configPR = read_c0_config7();
configPR |= 0x00000038;
write_c0_config7(configPR);
irq_desc[MIPS_CPU_TIMER_IRQ].chip = &level_irq_type;
set_irq_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
handle_level_irq);
setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
}

View File

@ -99,8 +99,6 @@ void prom_cpus_done(void)
*/
void prom_init_secondary(void)
{
mips_hpt_init();
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}

View File

@ -95,16 +95,11 @@ static irqreturn_t ip22_eisa_intr(int irq, void *dev_id)
static void enable_eisa1_irq(unsigned int irq)
{
unsigned long flags;
u8 mask;
local_irq_save(flags);
mask = inb(EISA_INT1_MASK);
mask &= ~((u8) (1 << irq));
outb(mask, EISA_INT1_MASK);
local_irq_restore(flags);
}
static unsigned int startup_eisa1_irq(unsigned int irq)
@ -130,8 +125,6 @@ static void disable_eisa1_irq(unsigned int irq)
outb(mask, EISA_INT1_MASK);
}
#define shutdown_eisa1_irq disable_eisa1_irq
static void mask_and_ack_eisa1_irq(unsigned int irq)
{
disable_eisa1_irq(irq);
@ -148,25 +141,20 @@ static void end_eisa1_irq(unsigned int irq)
static struct irq_chip ip22_eisa1_irq_type = {
.typename = "IP22 EISA",
.startup = startup_eisa1_irq,
.shutdown = shutdown_eisa1_irq,
.enable = enable_eisa1_irq,
.disable = disable_eisa1_irq,
.ack = mask_and_ack_eisa1_irq,
.mask = disable_eisa1_irq,
.mask_ack = mask_and_ack_eisa1_irq,
.unmask = enable_eisa1_irq,
.end = end_eisa1_irq,
};
static void enable_eisa2_irq(unsigned int irq)
{
unsigned long flags;
u8 mask;
local_irq_save(flags);
mask = inb(EISA_INT2_MASK);
mask &= ~((u8) (1 << (irq - 8)));
outb(mask, EISA_INT2_MASK);
local_irq_restore(flags);
}
static unsigned int startup_eisa2_irq(unsigned int irq)
@ -192,8 +180,6 @@ static void disable_eisa2_irq(unsigned int irq)
outb(mask, EISA_INT2_MASK);
}
#define shutdown_eisa2_irq disable_eisa2_irq
static void mask_and_ack_eisa2_irq(unsigned int irq)
{
disable_eisa2_irq(irq);
@ -210,10 +196,10 @@ static void end_eisa2_irq(unsigned int irq)
static struct irq_chip ip22_eisa2_irq_type = {
.typename = "IP22 EISA",
.startup = startup_eisa2_irq,
.shutdown = shutdown_eisa2_irq,
.enable = enable_eisa2_irq,
.disable = disable_eisa2_irq,
.ack = mask_and_ack_eisa2_irq,
.mask = disable_eisa2_irq,
.mask_ack = mask_and_ack_eisa2_irq,
.unmask = enable_eisa2_irq,
.end = end_eisa2_irq,
};
@ -275,13 +261,10 @@ int __init ip22_eisa_init(void)
outb(0, EISA_DMA2_WRITE_SINGLE);
for (i = SGINT_EISA; i < (SGINT_EISA + EISA_MAX_IRQ); i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < (SGINT_EISA + 8))
irq_desc[i].chip = &ip22_eisa1_irq_type;
set_irq_chip(i, &ip22_eisa1_irq_type);
else
irq_desc[i].chip = &ip22_eisa2_irq_type;
set_irq_chip(i, &ip22_eisa2_irq_type);
}
/* Cannot use request_irq because of kmalloc not being ready at such

View File

@ -40,34 +40,17 @@ extern int ip22_eisa_init(void);
static void enable_local0_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (irq != SGI_MAP_0_IRQ)
sgint->imask0 |= (1 << (irq - SGINT_LOCAL0));
local_irq_restore(flags);
}
static unsigned int startup_local0_irq(unsigned int irq)
{
enable_local0_irq(irq);
return 0; /* Never anything pending */
}
static void disable_local0_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->imask0 &= ~(1 << (irq - SGINT_LOCAL0));
local_irq_restore(flags);
}
#define shutdown_local0_irq disable_local0_irq
#define mask_and_ack_local0_irq disable_local0_irq
static void end_local0_irq (unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -76,44 +59,26 @@ static void end_local0_irq (unsigned int irq)
static struct irq_chip ip22_local0_irq_type = {
.typename = "IP22 local 0",
.startup = startup_local0_irq,
.shutdown = shutdown_local0_irq,
.enable = enable_local0_irq,
.disable = disable_local0_irq,
.ack = mask_and_ack_local0_irq,
.ack = disable_local0_irq,
.mask = disable_local0_irq,
.mask_ack = disable_local0_irq,
.unmask = enable_local0_irq,
.end = end_local0_irq,
};
static void enable_local1_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
/* don't allow mappable interrupt to be enabled from setup_irq,
* we have our own way to do so */
if (irq != SGI_MAP_1_IRQ)
sgint->imask1 |= (1 << (irq - SGINT_LOCAL1));
local_irq_restore(flags);
}
static unsigned int startup_local1_irq(unsigned int irq)
{
enable_local1_irq(irq);
return 0; /* Never anything pending */
}
void disable_local1_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->imask1 &= ~(1 << (irq - SGINT_LOCAL1));
local_irq_restore(flags);
}
#define shutdown_local1_irq disable_local1_irq
#define mask_and_ack_local1_irq disable_local1_irq
static void end_local1_irq (unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -122,44 +87,26 @@ static void end_local1_irq (unsigned int irq)
static struct irq_chip ip22_local1_irq_type = {
.typename = "IP22 local 1",
.startup = startup_local1_irq,
.shutdown = shutdown_local1_irq,
.enable = enable_local1_irq,
.disable = disable_local1_irq,
.ack = mask_and_ack_local1_irq,
.ack = disable_local1_irq,
.mask = disable_local1_irq,
.mask_ack = disable_local1_irq,
.unmask = enable_local1_irq,
.end = end_local1_irq,
};
static void enable_local2_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->imask0 |= (1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
sgint->cmeimask0 |= (1 << (irq - SGINT_LOCAL2));
local_irq_restore(flags);
}
static unsigned int startup_local2_irq(unsigned int irq)
{
enable_local2_irq(irq);
return 0; /* Never anything pending */
}
void disable_local2_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->cmeimask0 &= ~(1 << (irq - SGINT_LOCAL2));
if (!sgint->cmeimask0)
sgint->imask0 &= ~(1 << (SGI_MAP_0_IRQ - SGINT_LOCAL0));
local_irq_restore(flags);
}
#define shutdown_local2_irq disable_local2_irq
#define mask_and_ack_local2_irq disable_local2_irq
static void end_local2_irq (unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -168,44 +115,26 @@ static void end_local2_irq (unsigned int irq)
static struct irq_chip ip22_local2_irq_type = {
.typename = "IP22 local 2",
.startup = startup_local2_irq,
.shutdown = shutdown_local2_irq,
.enable = enable_local2_irq,
.disable = disable_local2_irq,
.ack = mask_and_ack_local2_irq,
.ack = disable_local2_irq,
.mask = disable_local2_irq,
.mask_ack = disable_local2_irq,
.unmask = enable_local2_irq,
.end = end_local2_irq,
};
static void enable_local3_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->imask1 |= (1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
sgint->cmeimask1 |= (1 << (irq - SGINT_LOCAL3));
local_irq_restore(flags);
}
static unsigned int startup_local3_irq(unsigned int irq)
{
enable_local3_irq(irq);
return 0; /* Never anything pending */
}
void disable_local3_irq(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
sgint->cmeimask1 &= ~(1 << (irq - SGINT_LOCAL3));
if (!sgint->cmeimask1)
sgint->imask1 &= ~(1 << (SGI_MAP_1_IRQ - SGINT_LOCAL1));
local_irq_restore(flags);
}
#define shutdown_local3_irq disable_local3_irq
#define mask_and_ack_local3_irq disable_local3_irq
static void end_local3_irq (unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -214,11 +143,10 @@ static void end_local3_irq (unsigned int irq)
static struct irq_chip ip22_local3_irq_type = {
.typename = "IP22 local 3",
.startup = startup_local3_irq,
.shutdown = shutdown_local3_irq,
.enable = enable_local3_irq,
.disable = disable_local3_irq,
.ack = mask_and_ack_local3_irq,
.ack = disable_local3_irq,
.mask = disable_local3_irq,
.mask_ack = disable_local3_irq,
.unmask = enable_local3_irq,
.end = end_local3_irq,
};
@ -430,10 +358,7 @@ void __init arch_init_irq(void)
else
handler = &ip22_local3_irq_type;
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = handler;
set_irq_chip_and_handler(i, handler, handle_level_irq);
}
/* vector handler. this register the IRQ as non-sharable */

View File

@ -332,11 +332,6 @@ static inline void disable_bridge_irq(unsigned int irq)
intr_disconnect_level(cpu, swlevel);
}
static void mask_and_ack_bridge_irq(unsigned int irq)
{
disable_bridge_irq(irq);
}
static void end_bridge_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
@ -348,18 +343,16 @@ static struct irq_chip bridge_irq_type = {
.typename = "bridge",
.startup = startup_bridge_irq,
.shutdown = shutdown_bridge_irq,
.enable = enable_bridge_irq,
.disable = disable_bridge_irq,
.ack = mask_and_ack_bridge_irq,
.ack = disable_bridge_irq,
.mask = disable_bridge_irq,
.mask_ack = disable_bridge_irq,
.unmask = enable_bridge_irq,
.end = end_bridge_irq,
};
void __devinit register_bridge_irq(unsigned int irq)
{
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 1;
irq_desc[irq].chip = &bridge_irq_type;
set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
}
int __devinit request_bridge_irq(struct bridge_controller *bc)

View File

@ -172,15 +172,6 @@ static __init unsigned long get_m48t35_time(void)
return mktime(year, month, date, hour, min, sec);
}
static unsigned int startup_rt_irq(unsigned int irq)
{
return 0;
}
static void shutdown_rt_irq(unsigned int irq)
{
}
static void enable_rt_irq(unsigned int irq)
{
}
@ -189,21 +180,17 @@ static void disable_rt_irq(unsigned int irq)
{
}
static void mask_and_ack_rt(unsigned int irq)
{
}
static void end_rt_irq(unsigned int irq)
{
}
static struct irq_chip rt_irq_type = {
.typename = "SN HUB RT timer",
.startup = startup_rt_irq,
.shutdown = shutdown_rt_irq,
.enable = enable_rt_irq,
.disable = disable_rt_irq,
.ack = mask_and_ack_rt,
.ack = disable_rt_irq,
.mask = disable_rt_irq,
.mask_ack = disable_rt_irq,
.unmask = enable_rt_irq,
.eoi = enable_rt_irq,
.end = end_rt_irq,
};
@ -221,10 +208,7 @@ void __init plat_timer_setup(struct irqaction *irq)
if (irqno < 0)
panic("Can't allocate interrupt number for timer interrupt");
irq_desc[irqno].status = IRQ_DISABLED;
irq_desc[irqno].action = NULL;
irq_desc[irqno].depth = 1;
irq_desc[irqno].chip = &rt_irq_type;
set_irq_chip_and_handler(irqno, &rt_irq_type, handle_percpu_irq);
/* over-write the handler, we use our own way */
irq->handler = no_action;
@ -239,14 +223,14 @@ void __init plat_timer_setup(struct irqaction *irq)
setup_irq(irqno, &rt_irqaction);
}
static unsigned int ip27_hpt_read(void)
static cycle_t ip27_hpt_read(void)
{
return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT);
}
void __init ip27_time_init(void)
{
mips_hpt_read = ip27_hpt_read;
clocksource_mips.read = ip27_hpt_read;
mips_hpt_frequency = CYCLES_PER_SEC;
xtime.tv_sec = get_m48t35_time();
xtime.tv_nsec = 0;

View File

@ -113,12 +113,6 @@ static void inline flush_mace_bus(void)
* is quite different anyway.
*/
/*
* IRQ spinlock - Ralf says not to disable CPU interrupts,
* and I think he knows better.
*/
static DEFINE_SPINLOCK(ip32_irq_lock);
/* Some initial interrupts to set up */
extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
@ -138,12 +132,6 @@ static void enable_cpu_irq(unsigned int irq)
set_c0_status(STATUSF_IP7);
}
static unsigned int startup_cpu_irq(unsigned int irq)
{
enable_cpu_irq(irq);
return 0;
}
static void disable_cpu_irq(unsigned int irq)
{
clear_c0_status(STATUSF_IP7);
@ -155,16 +143,12 @@ static void end_cpu_irq(unsigned int irq)
enable_cpu_irq (irq);
}
#define shutdown_cpu_irq disable_cpu_irq
#define mask_and_ack_cpu_irq disable_cpu_irq
static struct irq_chip ip32_cpu_interrupt = {
.typename = "IP32 CPU",
.startup = startup_cpu_irq,
.shutdown = shutdown_cpu_irq,
.enable = enable_cpu_irq,
.disable = disable_cpu_irq,
.ack = mask_and_ack_cpu_irq,
.ack = disable_cpu_irq,
.mask = disable_cpu_irq,
.mask_ack = disable_cpu_irq,
.unmask = enable_cpu_irq,
.end = end_cpu_irq,
};
@ -177,45 +161,27 @@ static uint64_t crime_mask;
static void enable_crime_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static unsigned int startup_crime_irq(unsigned int irq)
{
enable_crime_irq(irq);
return 0; /* This is probably not right; we could have pending irqs */
}
static void disable_crime_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void mask_and_ack_crime_irq(unsigned int irq)
{
unsigned long flags;
/* Edge triggered interrupts must be cleared. */
if ((irq >= CRIME_GBE0_IRQ && irq <= CRIME_GBE3_IRQ)
|| (irq >= CRIME_RE_EMPTY_E_IRQ && irq <= CRIME_RE_IDLE_E_IRQ)
|| (irq >= CRIME_SOFT0_IRQ && irq <= CRIME_SOFT2_IRQ)) {
uint64_t crime_int;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_int = crime->hard_int;
crime_int &= ~(1 << (irq - 1));
crime->hard_int = crime_int;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
disable_crime_irq(irq);
}
@ -226,15 +192,12 @@ static void end_crime_irq(unsigned int irq)
enable_crime_irq(irq);
}
#define shutdown_crime_irq disable_crime_irq
static struct irq_chip ip32_crime_interrupt = {
.typename = "IP32 CRIME",
.startup = startup_crime_irq,
.shutdown = shutdown_crime_irq,
.enable = enable_crime_irq,
.disable = disable_crime_irq,
.ack = mask_and_ack_crime_irq,
.mask = disable_crime_irq,
.mask_ack = mask_and_ack_crime_irq,
.unmask = enable_crime_irq,
.end = end_crime_irq,
};
@ -248,34 +211,20 @@ static unsigned long macepci_mask;
static void enable_macepci_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
macepci_mask |= MACEPCI_CONTROL_INT(irq - 9);
mace->pci.control = macepci_mask;
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static unsigned int startup_macepci_irq(unsigned int irq)
{
enable_macepci_irq (irq);
return 0;
}
static void disable_macepci_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
macepci_mask &= ~MACEPCI_CONTROL_INT(irq - 9);
mace->pci.control = macepci_mask;
flush_mace_bus();
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void end_macepci_irq(unsigned int irq)
@ -284,16 +233,12 @@ static void end_macepci_irq(unsigned int irq)
enable_macepci_irq(irq);
}
#define shutdown_macepci_irq disable_macepci_irq
#define mask_and_ack_macepci_irq disable_macepci_irq
static struct irq_chip ip32_macepci_interrupt = {
.typename = "IP32 MACE PCI",
.startup = startup_macepci_irq,
.shutdown = shutdown_macepci_irq,
.enable = enable_macepci_irq,
.disable = disable_macepci_irq,
.ack = mask_and_ack_macepci_irq,
.ack = disable_macepci_irq,
.mask = disable_macepci_irq,
.mask_ack = disable_macepci_irq,
.unmask = enable_macepci_irq,
.end = end_macepci_irq,
};
@ -339,7 +284,6 @@ static unsigned long maceisa_mask;
static void enable_maceisa_irq (unsigned int irq)
{
unsigned int crime_int = 0;
unsigned long flags;
DBG ("maceisa enable: %u\n", irq);
@ -355,26 +299,16 @@ static void enable_maceisa_irq (unsigned int irq)
break;
}
DBG ("crime_int %08x enabled\n", crime_int);
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= crime_int;
crime->imask = crime_mask;
maceisa_mask |= 1 << (irq - 33);
mace->perif.ctrl.imask = maceisa_mask;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static unsigned int startup_maceisa_irq(unsigned int irq)
{
enable_maceisa_irq(irq);
return 0;
}
static void disable_maceisa_irq(unsigned int irq)
{
unsigned int crime_int = 0;
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
maceisa_mask &= ~(1 << (irq - 33));
if(!(maceisa_mask & MACEISA_AUDIO_INT))
crime_int |= MACE_AUDIO_INT;
@ -387,23 +321,20 @@ static void disable_maceisa_irq(unsigned int irq)
flush_crime_bus();
mace->perif.ctrl.imask = maceisa_mask;
flush_mace_bus();
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void mask_and_ack_maceisa_irq(unsigned int irq)
{
unsigned long mace_int, flags;
unsigned long mace_int;
switch (irq) {
case MACEISA_PARALLEL_IRQ:
case MACEISA_SERIAL1_TDMAPR_IRQ:
case MACEISA_SERIAL2_TDMAPR_IRQ:
/* edge triggered */
spin_lock_irqsave(&ip32_irq_lock, flags);
mace_int = mace->perif.ctrl.istat;
mace_int &= ~(1 << (irq - 33));
mace->perif.ctrl.istat = mace_int;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
break;
}
disable_maceisa_irq(irq);
@ -415,15 +346,12 @@ static void end_maceisa_irq(unsigned irq)
enable_maceisa_irq(irq);
}
#define shutdown_maceisa_irq disable_maceisa_irq
static struct irq_chip ip32_maceisa_interrupt = {
.typename = "IP32 MACE ISA",
.startup = startup_maceisa_irq,
.shutdown = shutdown_maceisa_irq,
.enable = enable_maceisa_irq,
.disable = disable_maceisa_irq,
.ack = mask_and_ack_maceisa_irq,
.mask = disable_maceisa_irq,
.mask_ack = mask_and_ack_maceisa_irq,
.unmask = enable_maceisa_irq,
.end = end_maceisa_irq,
};
@ -433,29 +361,15 @@ static struct irq_chip ip32_maceisa_interrupt = {
static void enable_mace_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask |= 1 << (irq - 1);
crime->imask = crime_mask;
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static unsigned int startup_mace_irq(unsigned int irq)
{
enable_mace_irq(irq);
return 0;
}
static void disable_mace_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&ip32_irq_lock, flags);
crime_mask &= ~(1 << (irq - 1));
crime->imask = crime_mask;
flush_crime_bus();
spin_unlock_irqrestore(&ip32_irq_lock, flags);
}
static void end_mace_irq(unsigned int irq)
@ -464,16 +378,12 @@ static void end_mace_irq(unsigned int irq)
enable_mace_irq(irq);
}
#define shutdown_mace_irq disable_mace_irq
#define mask_and_ack_mace_irq disable_mace_irq
static struct irq_chip ip32_mace_interrupt = {
.typename = "IP32 MACE",
.startup = startup_mace_irq,
.shutdown = shutdown_mace_irq,
.enable = enable_mace_irq,
.disable = disable_mace_irq,
.ack = mask_and_ack_mace_irq,
.ack = disable_mace_irq,
.mask = disable_mace_irq,
.mask_ack = disable_mace_irq,
.unmask = enable_mace_irq,
.end = end_mace_irq,
};
@ -586,10 +496,7 @@ void __init arch_init_irq(void)
else
controller = &ip32_maceisa_interrupt;
irq_desc[irq].status = IRQ_DISABLED;
irq_desc[irq].action = 0;
irq_desc[irq].depth = 0;
irq_desc[irq].chip = controller;
set_irq_chip(irq, controller);
}
setup_irq(CRIME_MEMERR_IRQ, &memerr_irq);
setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq);

View File

@ -45,11 +45,9 @@
*/
#define shutdown_bcm1480_irq disable_bcm1480_irq
static void end_bcm1480_irq(unsigned int irq);
static void enable_bcm1480_irq(unsigned int irq);
static void disable_bcm1480_irq(unsigned int irq);
static unsigned int startup_bcm1480_irq(unsigned int irq);
static void ack_bcm1480_irq(unsigned int irq);
#ifdef CONFIG_SMP
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask);
@ -85,11 +83,10 @@ extern char sb1250_duart_present[];
static struct irq_chip bcm1480_irq_type = {
.typename = "BCM1480-IMR",
.startup = startup_bcm1480_irq,
.shutdown = shutdown_bcm1480_irq,
.enable = enable_bcm1480_irq,
.disable = disable_bcm1480_irq,
.ack = ack_bcm1480_irq,
.mask = disable_bcm1480_irq,
.mask_ack = ack_bcm1480_irq,
.unmask = enable_bcm1480_irq,
.end = end_bcm1480_irq,
#ifdef CONFIG_SMP
.set_affinity = bcm1480_set_affinity
@ -188,14 +185,6 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
/*****************************************************************************/
static unsigned int startup_bcm1480_irq(unsigned int irq)
{
bcm1480_unmask_irq(bcm1480_irq_owner[irq], irq);
return 0; /* never anything pending */
}
static void disable_bcm1480_irq(unsigned int irq)
{
bcm1480_mask_irq(bcm1480_irq_owner[irq], irq);
@ -270,16 +259,9 @@ void __init init_bcm1480_irqs(void)
{
int i;
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < BCM1480_NR_IRQS) {
irq_desc[i].chip = &bcm1480_irq_type;
bcm1480_irq_owner[i] = 0;
} else {
irq_desc[i].chip = &no_irq_chip;
}
for (i = 0; i < BCM1480_NR_IRQS; i++) {
set_irq_chip(i, &bcm1480_irq_type);
bcm1480_irq_owner[i] = 0;
}
}

View File

@ -94,8 +94,6 @@ void bcm1480_time_init(void)
*/
}
#include <asm/sibyte/sb1250.h>
void bcm1480_timer_interrupt(void)
{
int cpu = smp_processor_id();
@ -119,7 +117,7 @@ void bcm1480_timer_interrupt(void)
}
}
static unsigned int bcm1480_hpt_read(void)
static cycle_t bcm1480_hpt_read(void)
{
/* We assume this function is called xtime_lock held. */
unsigned long count =
@ -129,6 +127,6 @@ static unsigned int bcm1480_hpt_read(void)
void __init bcm1480_hpt_setup(void)
{
mips_hpt_read = bcm1480_hpt_read;
clocksource_mips.read = bcm1480_hpt_read;
mips_hpt_frequency = BCM1480_HPT_VALUE;
}

View File

@ -44,11 +44,9 @@
*/
#define shutdown_sb1250_irq disable_sb1250_irq
static void end_sb1250_irq(unsigned int irq);
static void enable_sb1250_irq(unsigned int irq);
static void disable_sb1250_irq(unsigned int irq);
static unsigned int startup_sb1250_irq(unsigned int irq);
static void ack_sb1250_irq(unsigned int irq);
#ifdef CONFIG_SMP
static void sb1250_set_affinity(unsigned int irq, cpumask_t mask);
@ -70,11 +68,10 @@ extern char sb1250_duart_present[];
static struct irq_chip sb1250_irq_type = {
.typename = "SB1250-IMR",
.startup = startup_sb1250_irq,
.shutdown = shutdown_sb1250_irq,
.enable = enable_sb1250_irq,
.disable = disable_sb1250_irq,
.ack = ack_sb1250_irq,
.mask = disable_sb1250_irq,
.mask_ack = ack_sb1250_irq,
.unmask = enable_sb1250_irq,
.end = end_sb1250_irq,
#ifdef CONFIG_SMP
.set_affinity = sb1250_set_affinity
@ -163,14 +160,6 @@ static void sb1250_set_affinity(unsigned int irq, cpumask_t mask)
/*****************************************************************************/
static unsigned int startup_sb1250_irq(unsigned int irq)
{
sb1250_unmask_irq(sb1250_irq_owner[irq], irq);
return 0; /* never anything pending */
}
static void disable_sb1250_irq(unsigned int irq)
{
sb1250_mask_irq(sb1250_irq_owner[irq], irq);
@ -239,16 +228,9 @@ void __init init_sb1250_irqs(void)
{
int i;
for (i = 0; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
if (i < SB1250_NR_IRQS) {
irq_desc[i].chip = &sb1250_irq_type;
sb1250_irq_owner[i] = 0;
} else {
irq_desc[i].chip = &no_irq_chip;
}
for (i = 0; i < SB1250_NR_IRQS; i++) {
set_irq_chip(i, &sb1250_irq_type);
sb1250_irq_owner[i] = 0;
}
}

View File

@ -51,7 +51,7 @@
extern int sb1250_steal_irq(int irq);
static unsigned int sb1250_hpt_read(void);
static cycle_t sb1250_hpt_read(void);
void __init sb1250_hpt_setup(void)
{
@ -66,8 +66,8 @@ void __init sb1250_hpt_setup(void)
IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CFG)));
mips_hpt_frequency = V_SCD_TIMER_FREQ;
mips_hpt_read = sb1250_hpt_read;
mips_hpt_mask = M_SCD_TIMER_INIT;
clocksource_mips.read = sb1250_hpt_read;
clocksource_mips.mask = M_SCD_TIMER_INIT;
}
}
@ -143,7 +143,7 @@ void sb1250_timer_interrupt(void)
* The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
* again.
*/
static unsigned int sb1250_hpt_read(void)
static cycle_t sb1250_hpt_read(void)
{
unsigned int count;

View File

@ -11,44 +11,25 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/sni.h>
DEFINE_SPINLOCK(pciasic_lock);
static void enable_pciasic_irq(unsigned int irq)
{
unsigned int mask = 1 << (irq - PCIMT_IRQ_INT2);
unsigned long flags;
spin_lock_irqsave(&pciasic_lock, flags);
*(volatile u8 *) PCIMT_IRQSEL |= mask;
spin_unlock_irqrestore(&pciasic_lock, flags);
}
static unsigned int startup_pciasic_irq(unsigned int irq)
{
enable_pciasic_irq(irq);
return 0; /* never anything pending */
}
#define shutdown_pciasic_irq disable_pciasic_irq
void disable_pciasic_irq(unsigned int irq)
{
unsigned int mask = ~(1 << (irq - PCIMT_IRQ_INT2));
unsigned long flags;
spin_lock_irqsave(&pciasic_lock, flags);
*(volatile u8 *) PCIMT_IRQSEL &= mask;
spin_unlock_irqrestore(&pciasic_lock, flags);
}
#define mask_and_ack_pciasic_irq disable_pciasic_irq
static void end_pciasic_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
@ -57,11 +38,10 @@ static void end_pciasic_irq(unsigned int irq)
static struct irq_chip pciasic_irq_type = {
.typename = "ASIC-PCI",
.startup = startup_pciasic_irq,
.shutdown = shutdown_pciasic_irq,
.enable = enable_pciasic_irq,
.disable = disable_pciasic_irq,
.ack = mask_and_ack_pciasic_irq,
.ack = disable_pciasic_irq,
.mask = disable_pciasic_irq,
.mask_ack = disable_pciasic_irq,
.unmask = enable_pciasic_irq,
.end = end_pciasic_irq,
};
@ -178,12 +158,8 @@ asmlinkage void plat_irq_dispatch(void)
void __init init_pciasic(void)
{
unsigned long flags;
spin_lock_irqsave(&pciasic_lock, flags);
* (volatile u8 *) PCIMT_IRQSEL =
IT_EISA | IT_INTA | IT_INTB | IT_INTC | IT_INTD;
spin_unlock_irqrestore(&pciasic_lock, flags);
}
/*
@ -199,12 +175,8 @@ void __init arch_init_irq(void)
init_pciasic();
/* Actually we've got more interrupts to handle ... */
for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &pciasic_irq_type;
}
for (i = PCIMT_IRQ_INT2; i <= PCIMT_IRQ_ETHERNET; i++)
set_irq_chip(i, &pciasic_irq_type);
change_c0_status(ST0_IM, IE_IRQ1|IE_IRQ2|IE_IRQ3|IE_IRQ4);
}

View File

@ -64,19 +64,13 @@
#define TX4927_IRQ_NEST4 ( 1 << 9 )
#define TX4927_IRQ_CP0_INIT ( 1 << 10 )
#define TX4927_IRQ_CP0_STARTUP ( 1 << 11 )
#define TX4927_IRQ_CP0_SHUTDOWN ( 1 << 12 )
#define TX4927_IRQ_CP0_ENABLE ( 1 << 13 )
#define TX4927_IRQ_CP0_DISABLE ( 1 << 14 )
#define TX4927_IRQ_CP0_MASK ( 1 << 15 )
#define TX4927_IRQ_CP0_ENDIRQ ( 1 << 16 )
#define TX4927_IRQ_PIC_INIT ( 1 << 20 )
#define TX4927_IRQ_PIC_STARTUP ( 1 << 21 )
#define TX4927_IRQ_PIC_SHUTDOWN ( 1 << 22 )
#define TX4927_IRQ_PIC_ENABLE ( 1 << 23 )
#define TX4927_IRQ_PIC_DISABLE ( 1 << 24 )
#define TX4927_IRQ_PIC_MASK ( 1 << 25 )
#define TX4927_IRQ_PIC_ENDIRQ ( 1 << 26 )
#define TX4927_IRQ_ALL 0xffffffff
@ -87,18 +81,12 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
| TX4927_IRQ_INFO
| TX4927_IRQ_WARN | TX4927_IRQ_EROR
// | TX4927_IRQ_CP0_INIT
// | TX4927_IRQ_CP0_STARTUP
// | TX4927_IRQ_CP0_SHUTDOWN
// | TX4927_IRQ_CP0_ENABLE
// | TX4927_IRQ_CP0_DISABLE
// | TX4927_IRQ_CP0_MASK
// | TX4927_IRQ_CP0_ENDIRQ
// | TX4927_IRQ_PIC_INIT
// | TX4927_IRQ_PIC_STARTUP
// | TX4927_IRQ_PIC_SHUTDOWN
// | TX4927_IRQ_PIC_ENABLE
// | TX4927_IRQ_PIC_DISABLE
// | TX4927_IRQ_PIC_MASK
// | TX4927_IRQ_PIC_ENDIRQ
// | TX4927_IRQ_INIT
// | TX4927_IRQ_NEST1
@ -124,49 +112,36 @@ static const u32 tx4927_irq_debug_flag = (TX4927_IRQ_NONE
* Forwad definitions for all pic's
*/
static unsigned int tx4927_irq_cp0_startup(unsigned int irq);
static void tx4927_irq_cp0_shutdown(unsigned int irq);
static void tx4927_irq_cp0_enable(unsigned int irq);
static void tx4927_irq_cp0_disable(unsigned int irq);
static void tx4927_irq_cp0_mask_and_ack(unsigned int irq);
static void tx4927_irq_cp0_end(unsigned int irq);
static unsigned int tx4927_irq_pic_startup(unsigned int irq);
static void tx4927_irq_pic_shutdown(unsigned int irq);
static void tx4927_irq_pic_enable(unsigned int irq);
static void tx4927_irq_pic_disable(unsigned int irq);
static void tx4927_irq_pic_mask_and_ack(unsigned int irq);
static void tx4927_irq_pic_end(unsigned int irq);
/*
* Kernel structs for all pic's
*/
static DEFINE_SPINLOCK(tx4927_cp0_lock);
static DEFINE_SPINLOCK(tx4927_pic_lock);
#define TX4927_CP0_NAME "TX4927-CP0"
static struct irq_chip tx4927_irq_cp0_type = {
.typename = TX4927_CP0_NAME,
.startup = tx4927_irq_cp0_startup,
.shutdown = tx4927_irq_cp0_shutdown,
.enable = tx4927_irq_cp0_enable,
.disable = tx4927_irq_cp0_disable,
.ack = tx4927_irq_cp0_mask_and_ack,
.ack = tx4927_irq_cp0_disable,
.mask = tx4927_irq_cp0_disable,
.mask_ack = tx4927_irq_cp0_disable,
.unmask = tx4927_irq_cp0_enable,
.end = tx4927_irq_cp0_end,
.set_affinity = NULL
};
#define TX4927_PIC_NAME "TX4927-PIC"
static struct irq_chip tx4927_irq_pic_type = {
.typename = TX4927_PIC_NAME,
.startup = tx4927_irq_pic_startup,
.shutdown = tx4927_irq_pic_shutdown,
.enable = tx4927_irq_pic_enable,
.disable = tx4927_irq_pic_disable,
.ack = tx4927_irq_pic_mask_and_ack,
.ack = tx4927_irq_pic_disable,
.mask = tx4927_irq_pic_disable,
.mask_ack = tx4927_irq_pic_disable,
.unmask = tx4927_irq_pic_enable,
.end = tx4927_irq_pic_end,
.set_affinity = NULL
};
#define TX4927_PIC_ACTION(s) { no_action, 0, CPU_MASK_NONE, s, NULL, NULL }
@ -211,8 +186,6 @@ tx4927_irq_cp0_modify(unsigned cp0_reg, unsigned clr_bits, unsigned set_bits)
break;
}
}
return;
}
static void __init tx4927_irq_cp0_init(void)
@ -222,71 +195,23 @@ static void __init tx4927_irq_cp0_init(void)
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_INIT, "beg=%d end=%d\n",
TX4927_IRQ_CP0_BEG, TX4927_IRQ_CP0_END);
for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &tx4927_irq_cp0_type;
}
return;
}
static unsigned int tx4927_irq_cp0_startup(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_STARTUP, "irq=%d \n", irq);
tx4927_irq_cp0_enable(irq);
return (0);
}
static void tx4927_irq_cp0_shutdown(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_SHUTDOWN, "irq=%d \n", irq);
tx4927_irq_cp0_disable(irq);
return;
for (i = TX4927_IRQ_CP0_BEG; i <= TX4927_IRQ_CP0_END; i++)
set_irq_chip_and_handler(i, &tx4927_irq_cp0_type,
handle_level_irq);
}
static void tx4927_irq_cp0_enable(unsigned int irq)
{
unsigned long flags;
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_ENABLE, "irq=%d \n", irq);
spin_lock_irqsave(&tx4927_cp0_lock, flags);
tx4927_irq_cp0_modify(CCP0_STATUS, 0, tx4927_irq_cp0_mask(irq));
spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
return;
}
static void tx4927_irq_cp0_disable(unsigned int irq)
{
unsigned long flags;
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_DISABLE, "irq=%d \n", irq);
spin_lock_irqsave(&tx4927_cp0_lock, flags);
tx4927_irq_cp0_modify(CCP0_STATUS, tx4927_irq_cp0_mask(irq), 0);
spin_unlock_irqrestore(&tx4927_cp0_lock, flags);
return;
}
static void tx4927_irq_cp0_mask_and_ack(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_CP0_MASK, "irq=%d \n", irq);
tx4927_irq_cp0_disable(irq);
return;
}
static void tx4927_irq_cp0_end(unsigned int irq)
@ -296,8 +221,6 @@ static void tx4927_irq_cp0_end(unsigned int irq)
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
tx4927_irq_cp0_enable(irq);
}
return;
}
/*
@ -418,94 +341,39 @@ static void tx4927_irq_pic_modify(unsigned pic_reg, unsigned clr_bits,
val &= (~clr_bits);
val |= (set_bits);
TX4927_WR(pic_reg, val);
return;
}
static void __init tx4927_irq_pic_init(void)
{
unsigned long flags;
int i;
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_INIT, "beg=%d end=%d\n",
TX4927_IRQ_PIC_BEG, TX4927_IRQ_PIC_END);
for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
irq_desc[i].chip = &tx4927_irq_pic_type;
}
for (i = TX4927_IRQ_PIC_BEG; i <= TX4927_IRQ_PIC_END; i++)
set_irq_chip_and_handler(i, &tx4927_irq_pic_type,
handle_level_irq);
setup_irq(TX4927_IRQ_NEST_PIC_ON_CP0, &tx4927_irq_pic_action);
spin_lock_irqsave(&tx4927_pic_lock, flags);
TX4927_WR(0xff1ff640, 0x6); /* irq level mask -- only accept hightest */
TX4927_WR(0xff1ff600, TX4927_RD(0xff1ff600) | 0x1); /* irq enable */
spin_unlock_irqrestore(&tx4927_pic_lock, flags);
return;
}
static unsigned int tx4927_irq_pic_startup(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_STARTUP, "irq=%d\n", irq);
tx4927_irq_pic_enable(irq);
return (0);
}
static void tx4927_irq_pic_shutdown(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_SHUTDOWN, "irq=%d\n", irq);
tx4927_irq_pic_disable(irq);
return;
}
static void tx4927_irq_pic_enable(unsigned int irq)
{
unsigned long flags;
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_ENABLE, "irq=%d\n", irq);
spin_lock_irqsave(&tx4927_pic_lock, flags);
tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq), 0,
tx4927_irq_pic_mask(irq));
spin_unlock_irqrestore(&tx4927_pic_lock, flags);
return;
}
static void tx4927_irq_pic_disable(unsigned int irq)
{
unsigned long flags;
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_DISABLE, "irq=%d\n", irq);
spin_lock_irqsave(&tx4927_pic_lock, flags);
tx4927_irq_pic_modify(tx4927_irq_pic_addr(irq),
tx4927_irq_pic_mask(irq), 0);
spin_unlock_irqrestore(&tx4927_pic_lock, flags);
return;
}
static void tx4927_irq_pic_mask_and_ack(unsigned int irq)
{
TX4927_IRQ_DPRINTK(TX4927_IRQ_PIC_MASK, "irq=%d\n", irq);
tx4927_irq_pic_disable(irq);
return;
}
static void tx4927_irq_pic_end(unsigned int irq)
@ -515,8 +383,6 @@ static void tx4927_irq_pic_end(unsigned int irq)
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
tx4927_irq_pic_enable(irq);
}
return;
}
/*
@ -533,8 +399,6 @@ void __init tx4927_irq_init(void)
tx4927_irq_pic_init();
TX4927_IRQ_DPRINTK(TX4927_IRQ_INIT, "+\n");
return;
}
static int tx4927_irq_nested(void)

View File

@ -151,16 +151,11 @@ JP7 is not bus master -- do NOT use -- only 4 pci bus master's allowed -- SouthB
#define TOSHIBA_RBTX4927_IRQ_EROR ( 1 << 2 )
#define TOSHIBA_RBTX4927_IRQ_IOC_INIT ( 1 << 10 )
#define TOSHIBA_RBTX4927_IRQ_IOC_STARTUP ( 1 << 11 )
#define TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN ( 1 << 12 )
#define TOSHIBA_RBTX4927_IRQ_IOC_ENABLE ( 1 << 13 )
#define TOSHIBA_RBTX4927_IRQ_IOC_DISABLE ( 1 << 14 )
#define TOSHIBA_RBTX4927_IRQ_IOC_MASK ( 1 << 15 )
#define TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ ( 1 << 16 )
#define TOSHIBA_RBTX4927_IRQ_ISA_INIT ( 1 << 20 )
#define TOSHIBA_RBTX4927_IRQ_ISA_STARTUP ( 1 << 21 )
#define TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN ( 1 << 22 )
#define TOSHIBA_RBTX4927_IRQ_ISA_ENABLE ( 1 << 23 )
#define TOSHIBA_RBTX4927_IRQ_ISA_DISABLE ( 1 << 24 )
#define TOSHIBA_RBTX4927_IRQ_ISA_MASK ( 1 << 25 )
@ -175,15 +170,10 @@ static const u32 toshiba_rbtx4927_irq_debug_flag =
(TOSHIBA_RBTX4927_IRQ_NONE | TOSHIBA_RBTX4927_IRQ_INFO |
TOSHIBA_RBTX4927_IRQ_WARN | TOSHIBA_RBTX4927_IRQ_EROR
// | TOSHIBA_RBTX4927_IRQ_IOC_INIT
// | TOSHIBA_RBTX4927_IRQ_IOC_STARTUP
// | TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN
// | TOSHIBA_RBTX4927_IRQ_IOC_ENABLE
// | TOSHIBA_RBTX4927_IRQ_IOC_DISABLE
// | TOSHIBA_RBTX4927_IRQ_IOC_MASK
// | TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ
// | TOSHIBA_RBTX4927_IRQ_ISA_INIT
// | TOSHIBA_RBTX4927_IRQ_ISA_STARTUP
// | TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN
// | TOSHIBA_RBTX4927_IRQ_ISA_ENABLE
// | TOSHIBA_RBTX4927_IRQ_ISA_DISABLE
// | TOSHIBA_RBTX4927_IRQ_ISA_MASK
@ -231,35 +221,25 @@ extern void disable_8259A_irq(unsigned int irq);
extern void mask_and_ack_8259A(unsigned int irq);
#endif
static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq);
static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq);
#ifdef CONFIG_TOSHIBA_FPCIB0
static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_disable(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq);
static void toshiba_rbtx4927_irq_isa_end(unsigned int irq);
#endif
static DEFINE_SPINLOCK(toshiba_rbtx4927_ioc_lock);
#define TOSHIBA_RBTX4927_IOC_NAME "RBTX4927-IOC"
static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
.typename = TOSHIBA_RBTX4927_IOC_NAME,
.startup = toshiba_rbtx4927_irq_ioc_startup,
.shutdown = toshiba_rbtx4927_irq_ioc_shutdown,
.enable = toshiba_rbtx4927_irq_ioc_enable,
.disable = toshiba_rbtx4927_irq_ioc_disable,
.ack = toshiba_rbtx4927_irq_ioc_mask_and_ack,
.ack = toshiba_rbtx4927_irq_ioc_disable,
.mask = toshiba_rbtx4927_irq_ioc_disable,
.mask_ack = toshiba_rbtx4927_irq_ioc_disable,
.unmask = toshiba_rbtx4927_irq_ioc_enable,
.end = toshiba_rbtx4927_irq_ioc_end,
.set_affinity = NULL
};
#define TOSHIBA_RBTX4927_IOC_INTR_ENAB 0xbc002000
#define TOSHIBA_RBTX4927_IOC_INTR_STAT 0xbc002006
@ -269,13 +249,11 @@ static struct irq_chip toshiba_rbtx4927_irq_ioc_type = {
#define TOSHIBA_RBTX4927_ISA_NAME "RBTX4927-ISA"
static struct irq_chip toshiba_rbtx4927_irq_isa_type = {
.typename = TOSHIBA_RBTX4927_ISA_NAME,
.startup = toshiba_rbtx4927_irq_isa_startup,
.shutdown = toshiba_rbtx4927_irq_isa_shutdown,
.enable = toshiba_rbtx4927_irq_isa_enable,
.disable = toshiba_rbtx4927_irq_isa_disable,
.ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
.mask = toshiba_rbtx4927_irq_isa_disable,
.mask_ack = toshiba_rbtx4927_irq_isa_mask_and_ack,
.unmask = toshiba_rbtx4927_irq_isa_enable,
.end = toshiba_rbtx4927_irq_isa_end,
.set_affinity = NULL
};
#endif
@ -363,58 +341,16 @@ static void __init toshiba_rbtx4927_irq_ioc_init(void)
TOSHIBA_RBTX4927_IRQ_IOC_END);
for (i = TOSHIBA_RBTX4927_IRQ_IOC_BEG;
i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 3;
irq_desc[i].chip = &toshiba_rbtx4927_irq_ioc_type;
}
i <= TOSHIBA_RBTX4927_IRQ_IOC_END; i++)
set_irq_chip_and_handler(i, &toshiba_rbtx4927_irq_ioc_type,
handle_level_irq);
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_IOC_ON_PIC,
&toshiba_rbtx4927_irq_ioc_action);
return;
}
static unsigned int toshiba_rbtx4927_irq_ioc_startup(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_STARTUP,
"irq=%d\n", irq);
if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
|| irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
"bad irq=%d\n", irq);
panic("\n");
}
toshiba_rbtx4927_irq_ioc_enable(irq);
return (0);
}
static void toshiba_rbtx4927_irq_ioc_shutdown(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_SHUTDOWN,
"irq=%d\n", irq);
if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
|| irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
"bad irq=%d\n", irq);
panic("\n");
}
toshiba_rbtx4927_irq_ioc_disable(irq);
return;
}
static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
{
unsigned long flags;
volatile unsigned char v;
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENABLE,
@ -427,21 +363,14 @@ static void toshiba_rbtx4927_irq_ioc_enable(unsigned int irq)
panic("\n");
}
spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
v |= (1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
return;
}
static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
{
unsigned long flags;
volatile unsigned char v;
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_DISABLE,
@ -454,36 +383,11 @@ static void toshiba_rbtx4927_irq_ioc_disable(unsigned int irq)
panic("\n");
}
spin_lock_irqsave(&toshiba_rbtx4927_ioc_lock, flags);
v = TX4927_RD08(TOSHIBA_RBTX4927_IOC_INTR_ENAB);
v &= ~(1 << (irq - TOSHIBA_RBTX4927_IRQ_IOC_BEG));
TOSHIBA_RBTX4927_WR08(TOSHIBA_RBTX4927_IOC_INTR_ENAB, v);
spin_unlock_irqrestore(&toshiba_rbtx4927_ioc_lock, flags);
return;
}
static void toshiba_rbtx4927_irq_ioc_mask_and_ack(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_MASK,
"irq=%d\n", irq);
if (irq < TOSHIBA_RBTX4927_IRQ_IOC_BEG
|| irq > TOSHIBA_RBTX4927_IRQ_IOC_END) {
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
"bad irq=%d\n", irq);
panic("\n");
}
toshiba_rbtx4927_irq_ioc_disable(irq);
return;
}
static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_IOC_ENDIRQ,
@ -499,8 +403,6 @@ static void toshiba_rbtx4927_irq_ioc_end(unsigned int irq)
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
toshiba_rbtx4927_irq_ioc_enable(irq);
}
return;
}
@ -520,13 +422,8 @@ static void __init toshiba_rbtx4927_irq_isa_init(void)
TOSHIBA_RBTX4927_IRQ_ISA_END);
for (i = TOSHIBA_RBTX4927_IRQ_ISA_BEG;
i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth =
((i < TOSHIBA_RBTX4927_IRQ_ISA_MID) ? (4) : (5));
irq_desc[i].chip = &toshiba_rbtx4927_irq_isa_type;
}
i <= TOSHIBA_RBTX4927_IRQ_ISA_END; i++)
set_irq_chip(i, &toshiba_rbtx4927_irq_isa_type);
setup_irq(TOSHIBA_RBTX4927_IRQ_NEST_ISA_ON_IOC,
&toshiba_rbtx4927_irq_isa_master);
@ -536,48 +433,6 @@ static void __init toshiba_rbtx4927_irq_isa_init(void)
/* make sure we are looking at IRR (not ISR) */
outb(0x0A, 0x20);
outb(0x0A, 0xA0);
return;
}
#endif
#ifdef CONFIG_TOSHIBA_FPCIB0
static unsigned int toshiba_rbtx4927_irq_isa_startup(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_STARTUP,
"irq=%d\n", irq);
if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
|| irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
"bad irq=%d\n", irq);
panic("\n");
}
toshiba_rbtx4927_irq_isa_enable(irq);
return (0);
}
#endif
#ifdef CONFIG_TOSHIBA_FPCIB0
static void toshiba_rbtx4927_irq_isa_shutdown(unsigned int irq)
{
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_ISA_SHUTDOWN,
"irq=%d\n", irq);
if (irq < TOSHIBA_RBTX4927_IRQ_ISA_BEG
|| irq > TOSHIBA_RBTX4927_IRQ_ISA_END) {
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_EROR,
"bad irq=%d\n", irq);
panic("\n");
}
toshiba_rbtx4927_irq_isa_disable(irq);
return;
}
#endif
@ -596,8 +451,6 @@ static void toshiba_rbtx4927_irq_isa_enable(unsigned int irq)
}
enable_8259A_irq(irq);
return;
}
#endif
@ -616,8 +469,6 @@ static void toshiba_rbtx4927_irq_isa_disable(unsigned int irq)
}
disable_8259A_irq(irq);
return;
}
#endif
@ -636,8 +487,6 @@ static void toshiba_rbtx4927_irq_isa_mask_and_ack(unsigned int irq)
}
mask_and_ack_8259A(irq);
return;
}
#endif
@ -658,8 +507,6 @@ static void toshiba_rbtx4927_irq_isa_end(unsigned int irq)
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
toshiba_rbtx4927_irq_isa_enable(irq);
}
return;
}
#endif
@ -668,8 +515,6 @@ void __init arch_init_irq(void)
{
extern void tx4927_irq_init(void);
local_irq_disable();
tx4927_irq_init();
toshiba_rbtx4927_irq_ioc_init();
#ifdef CONFIG_TOSHIBA_FPCIB0
@ -681,8 +526,6 @@ void __init arch_init_irq(void)
#endif
wbflush();
return;
}
void toshiba_rbtx4927_irq_dump(char *key)
@ -715,7 +558,6 @@ void toshiba_rbtx4927_irq_dump(char *key)
}
}
#endif
return;
}
void toshiba_rbtx4927_irq_dump_pics(char *s)
@ -780,6 +622,4 @@ void toshiba_rbtx4927_irq_dump_pics(char *s)
level5_s);
TOSHIBA_RBTX4927_IRQ_DPRINTK(TOSHIBA_RBTX4927_IRQ_INFO, "[%s]\n",
s);
return;
}

View File

@ -37,48 +37,36 @@
/* Forwad definitions for all pic's */
/**********************************************************************************/
static unsigned int tx4938_irq_cp0_startup(unsigned int irq);
static void tx4938_irq_cp0_shutdown(unsigned int irq);
static void tx4938_irq_cp0_enable(unsigned int irq);
static void tx4938_irq_cp0_disable(unsigned int irq);
static void tx4938_irq_cp0_mask_and_ack(unsigned int irq);
static void tx4938_irq_cp0_end(unsigned int irq);
static unsigned int tx4938_irq_pic_startup(unsigned int irq);
static void tx4938_irq_pic_shutdown(unsigned int irq);
static void tx4938_irq_pic_enable(unsigned int irq);
static void tx4938_irq_pic_disable(unsigned int irq);
static void tx4938_irq_pic_mask_and_ack(unsigned int irq);
static void tx4938_irq_pic_end(unsigned int irq);
/**********************************************************************************/
/* Kernel structs for all pic's */
/**********************************************************************************/
DEFINE_SPINLOCK(tx4938_cp0_lock);
DEFINE_SPINLOCK(tx4938_pic_lock);
#define TX4938_CP0_NAME "TX4938-CP0"
static struct irq_chip tx4938_irq_cp0_type = {
.typename = TX4938_CP0_NAME,
.startup = tx4938_irq_cp0_startup,
.shutdown = tx4938_irq_cp0_shutdown,
.enable = tx4938_irq_cp0_enable,
.disable = tx4938_irq_cp0_disable,
.ack = tx4938_irq_cp0_mask_and_ack,
.ack = tx4938_irq_cp0_disable,
.mask = tx4938_irq_cp0_disable,
.mask_ack = tx4938_irq_cp0_disable,
.unmask = tx4938_irq_cp0_enable,
.end = tx4938_irq_cp0_end,
.set_affinity = NULL
};
#define TX4938_PIC_NAME "TX4938-PIC"
static struct irq_chip tx4938_irq_pic_type = {
.typename = TX4938_PIC_NAME,
.startup = tx4938_irq_pic_startup,
.shutdown = tx4938_irq_pic_shutdown,
.enable = tx4938_irq_pic_enable,
.disable = tx4938_irq_pic_disable,
.ack = tx4938_irq_pic_mask_and_ack,
.ack = tx4938_irq_pic_disable,
.mask = tx4938_irq_pic_disable,
.mask_ack = tx4938_irq_pic_disable,
.unmask = tx4938_irq_pic_enable,
.end = tx4938_irq_pic_end,
.set_affinity = NULL
};
static struct irqaction tx4938_irq_pic_action = {
@ -99,56 +87,21 @@ tx4938_irq_cp0_init(void)
{
int i;
for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 1;
irq_desc[i].chip = &tx4938_irq_cp0_type;
}
}
static unsigned int
tx4938_irq_cp0_startup(unsigned int irq)
{
tx4938_irq_cp0_enable(irq);
return 0;
}
static void
tx4938_irq_cp0_shutdown(unsigned int irq)
{
tx4938_irq_cp0_disable(irq);
for (i = TX4938_IRQ_CP0_BEG; i <= TX4938_IRQ_CP0_END; i++)
set_irq_chip_and_handler(i, &tx4938_irq_cp0_type,
handle_level_irq);
}
static void
tx4938_irq_cp0_enable(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&tx4938_cp0_lock, flags);
set_c0_status(tx4938_irq_cp0_mask(irq));
spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
}
static void
tx4938_irq_cp0_disable(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&tx4938_cp0_lock, flags);
clear_c0_status(tx4938_irq_cp0_mask(irq));
spin_unlock_irqrestore(&tx4938_cp0_lock, flags);
}
static void
tx4938_irq_cp0_mask_and_ack(unsigned int irq)
{
tx4938_irq_cp0_disable(irq);
}
static void
@ -290,70 +243,30 @@ tx4938_irq_pic_modify(unsigned pic_reg, unsigned clr_bits, unsigned set_bits)
static void __init
tx4938_irq_pic_init(void)
{
unsigned long flags;
int i;
for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 2;
irq_desc[i].chip = &tx4938_irq_pic_type;
}
for (i = TX4938_IRQ_PIC_BEG; i <= TX4938_IRQ_PIC_END; i++)
set_irq_chip_and_handler(i, &tx4938_irq_pic_type,
handle_level_irq);
setup_irq(TX4938_IRQ_NEST_PIC_ON_CP0, &tx4938_irq_pic_action);
spin_lock_irqsave(&tx4938_pic_lock, flags);
TX4938_WR(0xff1ff640, 0x6); /* irq level mask -- only accept hightest */
TX4938_WR(0xff1ff600, TX4938_RD(0xff1ff600) | 0x1); /* irq enable */
spin_unlock_irqrestore(&tx4938_pic_lock, flags);
}
static unsigned int
tx4938_irq_pic_startup(unsigned int irq)
{
tx4938_irq_pic_enable(irq);
return 0;
}
static void
tx4938_irq_pic_shutdown(unsigned int irq)
{
tx4938_irq_pic_disable(irq);
}
static void
tx4938_irq_pic_enable(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&tx4938_pic_lock, flags);
tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq), 0,
tx4938_irq_pic_mask(irq));
spin_unlock_irqrestore(&tx4938_pic_lock, flags);
}
static void
tx4938_irq_pic_disable(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&tx4938_pic_lock, flags);
tx4938_irq_pic_modify(tx4938_irq_pic_addr(irq),
tx4938_irq_pic_mask(irq), 0);
spin_unlock_irqrestore(&tx4938_pic_lock, flags);
}
static void
tx4938_irq_pic_mask_and_ack(unsigned int irq)
{
tx4938_irq_pic_disable(irq);
}
static void

View File

@ -31,7 +31,6 @@
#include <asm/mipsregs.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/time.h>
#include <asm/tx4938/rbtx4938.h>
extern void toshiba_rbtx4938_setup(void);

View File

@ -87,25 +87,18 @@ IRQ Device
#include <linux/bootmem.h>
#include <asm/tx4938/rbtx4938.h>
static unsigned int toshiba_rbtx4938_irq_ioc_startup(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_disable(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq);
static void toshiba_rbtx4938_irq_ioc_end(unsigned int irq);
DEFINE_SPINLOCK(toshiba_rbtx4938_ioc_lock);
#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
.typename = TOSHIBA_RBTX4938_IOC_NAME,
.startup = toshiba_rbtx4938_irq_ioc_startup,
.shutdown = toshiba_rbtx4938_irq_ioc_shutdown,
.enable = toshiba_rbtx4938_irq_ioc_enable,
.disable = toshiba_rbtx4938_irq_ioc_disable,
.ack = toshiba_rbtx4938_irq_ioc_mask_and_ack,
.ack = toshiba_rbtx4938_irq_ioc_disable,
.mask = toshiba_rbtx4938_irq_ioc_disable,
.mask_ack = toshiba_rbtx4938_irq_ioc_disable,
.unmask = toshiba_rbtx4938_irq_ioc_enable,
.end = toshiba_rbtx4938_irq_ioc_end,
.set_affinity = NULL
};
#define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
@ -142,69 +135,36 @@ toshiba_rbtx4938_irq_ioc_init(void)
int i;
for (i = TOSHIBA_RBTX4938_IRQ_IOC_BEG;
i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++) {
irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = 0;
irq_desc[i].depth = 3;
irq_desc[i].chip = &toshiba_rbtx4938_irq_ioc_type;
}
i <= TOSHIBA_RBTX4938_IRQ_IOC_END; i++)
set_irq_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
handle_level_irq);
setup_irq(RBTX4938_IRQ_IOCINT,
&toshiba_rbtx4938_irq_ioc_action);
}
static unsigned int
toshiba_rbtx4938_irq_ioc_startup(unsigned int irq)
{
toshiba_rbtx4938_irq_ioc_enable(irq);
return 0;
}
static void
toshiba_rbtx4938_irq_ioc_shutdown(unsigned int irq)
{
toshiba_rbtx4938_irq_ioc_disable(irq);
}
static void
toshiba_rbtx4938_irq_ioc_enable(unsigned int irq)
{
unsigned long flags;
volatile unsigned char v;
spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
v |= (1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
mmiowb();
TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
}
static void
toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
{
unsigned long flags;
volatile unsigned char v;
spin_lock_irqsave(&toshiba_rbtx4938_ioc_lock, flags);
v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
v &= ~(1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
mmiowb();
TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
spin_unlock_irqrestore(&toshiba_rbtx4938_ioc_lock, flags);
}
static void
toshiba_rbtx4938_irq_ioc_mask_and_ack(unsigned int irq)
{
toshiba_rbtx4938_irq_ioc_disable(irq);
}
static void

View File

@ -417,14 +417,7 @@ void vr41xx_disable_bcuint(void)
EXPORT_SYMBOL(vr41xx_disable_bcuint);
static unsigned int startup_sysint1_irq(unsigned int irq)
{
icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
return 0; /* never anything pending */
}
static void shutdown_sysint1_irq(unsigned int irq)
static void disable_sysint1_irq(unsigned int irq)
{
icu1_clear(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
}
@ -434,9 +427,6 @@ static void enable_sysint1_irq(unsigned int irq)
icu1_set(MSYSINT1REG, 1 << SYSINT1_IRQ_TO_PIN(irq));
}
#define disable_sysint1_irq shutdown_sysint1_irq
#define ack_sysint1_irq shutdown_sysint1_irq
static void end_sysint1_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -445,22 +435,14 @@ static void end_sysint1_irq(unsigned int irq)
static struct irq_chip sysint1_irq_type = {
.typename = "SYSINT1",
.startup = startup_sysint1_irq,
.shutdown = shutdown_sysint1_irq,
.enable = enable_sysint1_irq,
.disable = disable_sysint1_irq,
.ack = ack_sysint1_irq,
.ack = disable_sysint1_irq,
.mask = disable_sysint1_irq,
.mask_ack = disable_sysint1_irq,
.unmask = enable_sysint1_irq,
.end = end_sysint1_irq,
};
static unsigned int startup_sysint2_irq(unsigned int irq)
{
icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
return 0; /* never anything pending */
}
static void shutdown_sysint2_irq(unsigned int irq)
static void disable_sysint2_irq(unsigned int irq)
{
icu2_clear(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
}
@ -470,9 +452,6 @@ static void enable_sysint2_irq(unsigned int irq)
icu2_set(MSYSINT2REG, 1 << SYSINT2_IRQ_TO_PIN(irq));
}
#define disable_sysint2_irq shutdown_sysint2_irq
#define ack_sysint2_irq shutdown_sysint2_irq
static void end_sysint2_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
@ -481,11 +460,10 @@ static void end_sysint2_irq(unsigned int irq)
static struct irq_chip sysint2_irq_type = {
.typename = "SYSINT2",
.startup = startup_sysint2_irq,
.shutdown = shutdown_sysint2_irq,
.enable = enable_sysint2_irq,
.disable = disable_sysint2_irq,
.ack = ack_sysint2_irq,
.ack = disable_sysint2_irq,
.mask = disable_sysint2_irq,
.mask_ack = disable_sysint2_irq,
.unmask = enable_sysint2_irq,
.end = end_sysint2_irq,
};
@ -723,10 +701,12 @@ static int __init vr41xx_icu_init(void)
icu2_write(MGIUINTHREG, 0xffff);
for (i = SYSINT1_IRQ_BASE; i <= SYSINT1_IRQ_LAST; i++)
irq_desc[i].chip = &sysint1_irq_type;
set_irq_chip_and_handler(i, &sysint1_irq_type,
handle_level_irq);
for (i = SYSINT2_IRQ_BASE; i <= SYSINT2_IRQ_LAST; i++)
irq_desc[i].chip = &sysint2_irq_type;
set_irq_chip_and_handler(i, &sysint2_irq_type,
handle_level_irq);
cascade_irq(INT0_IRQ, icu_get_irq);
cascade_irq(INT1_IRQ, icu_get_irq);

View File

@ -30,17 +30,6 @@ extern void init_8259A(int hoge);
extern int vr4133_rockhopper;
static unsigned int startup_i8259_irq(unsigned int irq)
{
enable_8259A_irq(irq - I8259_IRQ_BASE);
return 0;
}
static void shutdown_i8259_irq(unsigned int irq)
{
disable_8259A_irq(irq - I8259_IRQ_BASE);
}
static void enable_i8259_irq(unsigned int irq)
{
enable_8259A_irq(irq - I8259_IRQ_BASE);
@ -64,11 +53,10 @@ static void end_i8259_irq(unsigned int irq)
static struct irq_chip i8259_irq_type = {
.typename = "XT-PIC",
.startup = startup_i8259_irq,
.shutdown = shutdown_i8259_irq,
.enable = enable_i8259_irq,
.disable = disable_i8259_irq,
.ack = ack_i8259_irq,
.mask = disable_i8259_irq,
.mask_ack = ack_i8259_irq,
.unmask = enable_i8259_irq,
.end = end_i8259_irq,
};
@ -104,7 +92,7 @@ void __init rockhopper_init_irq(void)
}
for (i = I8259_IRQ_BASE; i <= I8259_IRQ_LAST; i++)
irq_desc[i].chip = &i8259_irq_type;
set_irq_chip(i, &i8259_irq_type);
setup_irq(I8259_SLAVE_IRQ, &i8259_slave_cascade);

View File

@ -19,12 +19,16 @@
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
#define _LLCONST_(x) x
#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ long long
#define _LLCONST_(x) x ## LL
#define _ATYPE64_ __s64
#ifdef CONFIG_64BIT
#define _CONST64_(x) x ## L
#else
#define _CONST64_(x) x ## LL
#endif
#endif
/*
@ -48,7 +52,7 @@
*/
#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define XPHYSADDR(a) ((_ACAST64_(a)) & \
_LLCONST_(0x000000ffffffffff))
_CONST64_(0x000000ffffffffff))
#ifdef CONFIG_64BIT
@ -57,14 +61,14 @@
* The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _LLCONST_(0x0000000000000000)
#define XKSSEG _LLCONST_(0x4000000000000000)
#define XKPHYS _LLCONST_(0x8000000000000000)
#define XKSEG _LLCONST_(0xc000000000000000)
#define CKSEG0 _LLCONST_(0xffffffff80000000)
#define CKSEG1 _LLCONST_(0xffffffffa0000000)
#define CKSSEG _LLCONST_(0xffffffffc0000000)
#define CKSEG3 _LLCONST_(0xffffffffe0000000)
#define XKUSEG _CONST64_(0x0000000000000000)
#define XKSSEG _CONST64_(0x4000000000000000)
#define XKPHYS _CONST64_(0x8000000000000000)
#define XKSEG _CONST64_(0xc000000000000000)
#define CKSEG0 _CONST64_(0xffffffff80000000)
#define CKSEG1 _CONST64_(0xffffffffa0000000)
#define CKSSEG _CONST64_(0xffffffffc0000000)
#define CKSEG3 _CONST64_(0xffffffffe0000000)
#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0)
#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1)
@ -122,7 +126,7 @@
#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED,(p))
#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE,(p))
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
#define PHYS_TO_XKPHYS(cm,a) (_LLCONST_(0x8000000000000000) | \
#define PHYS_TO_XKPHYS(cm,a) (_CONST64_(0x8000000000000000) | \
((cm)<<59) | (a))
#if defined (CONFIG_CPU_R4300) \
@ -132,20 +136,20 @@
|| defined (CONFIG_CPU_NEVADA) \
|| defined (CONFIG_CPU_TX49XX) \
|| defined (CONFIG_CPU_MIPS64)
#define TO_PHYS_MASK _LLCONST_(0x0000000fffffffff) /* 2^^36 - 1 */
#define TO_PHYS_MASK _CONST64_(0x0000000fffffffff) /* 2^^36 - 1 */
#endif
#if defined (CONFIG_CPU_R8000)
/* We keep KUSIZE consistent with R4000 for now (2^^40) instead of (2^^48) */
#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */
#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */
#endif
#if defined (CONFIG_CPU_R10000)
#define TO_PHYS_MASK _LLCONST_(0x000000ffffffffff) /* 2^^40 - 1 */
#define TO_PHYS_MASK _CONST64_(0x000000ffffffffff) /* 2^^40 - 1 */
#endif
#if defined(CONFIG_CPU_SB1) || defined(CONFIG_CPU_SB1A)
#define TO_PHYS_MASK _LLCONST_(0x00000fffffffffff) /* 2^^44 - 1 */
#define TO_PHYS_MASK _CONST64_(0x00000fffffffffff) /* 2^^44 - 1 */
#endif
#ifndef CONFIG_CPU_R8000
@ -155,7 +159,7 @@
* in order to catch bugs in the source code.
*/
#define COMPAT_K1BASE32 _LLCONST_(0xffffffffa0000000)
#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000)
#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */
#endif

View File

@ -9,16 +9,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
* Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
*/
/*
* As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
* <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
* main big wrapper ...
*/
#include <linux/spinlock.h>
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H

View File

@ -10,51 +10,32 @@
#define _ASM_BITOPS_H
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/bug.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/cpu-features.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
#if (_MIPS_SZLONG == 32)
#define SZLONG_LOG 5
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
#define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
#elif (_MIPS_SZLONG == 64)
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
#define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
#endif
#ifdef __KERNEL__
#include <linux/irqflags.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
/*
* Only disable interrupt for kernel mode stuff to keep usermode stuff
* that dares to use kernel include files alive.
*/
#define __bi_flags unsigned long flags
#define __bi_local_irq_save(x) local_irq_save(x)
#define __bi_local_irq_restore(x) local_irq_restore(x)
#else
#define __bi_flags
#define __bi_local_irq_save(x)
#define __bi_local_irq_restore(x)
#endif /* __KERNEL__ */
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@ -93,13 +74,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} else {
volatile unsigned long *a = addr;
unsigned long mask;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
*a |= mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
}
}
@ -141,13 +122,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} else {
volatile unsigned long *a = addr;
unsigned long mask;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
*a &= ~mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
}
}
@ -191,13 +172,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
} else {
volatile unsigned long *a = addr;
unsigned long mask;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
*a ^= mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
}
}
@ -258,14 +239,14 @@ static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *a = addr;
unsigned long mask;
int retval;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a |= mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
return retval;
}
@ -330,14 +311,14 @@ static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *a = addr;
unsigned long mask;
int retval;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
return retval;
}
@ -399,23 +380,19 @@ static inline int test_and_change_bit(unsigned long nr,
} else {
volatile unsigned long *a = addr;
unsigned long mask, retval;
__bi_flags;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << (nr & SZLONG_MASK);
__bi_local_irq_save(flags);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a ^= mask;
__bi_local_irq_restore(flags);
local_irq_restore(flags);
return retval;
}
}
#undef __bi_flags
#undef __bi_local_irq_save
#undef __bi_local_irq_restore
#include <asm-generic/bitops/non-atomic.h>
/*

View File

@ -1,6 +1,7 @@
#ifndef __ASM_BUG_H
#define __ASM_BUG_H
#include <asm/sgidefs.h>
#ifdef CONFIG_BUG
@ -13,6 +14,17 @@ do { \
#define HAVE_ARCH_BUG
#if (_MIPS_ISA > _MIPS_ISA_MIPS1)
#define BUG_ON(condition) \
do { \
__asm__ __volatile__("tne $0, %0" : : "r" (condition)); \
} while (0)
#define HAVE_ARCH_BUG_ON
#endif /* _MIPS_ISA > _MIPS_ISA_MIPS1 */
#endif
#include <asm-generic/bug.h>

View File

@ -22,12 +22,12 @@
* Descriptor for a cache
*/
struct cache_desc {
unsigned short linesz; /* Size of line in bytes */
unsigned short ways; /* Number of ways */
unsigned short sets; /* Number of lines per set */
unsigned int waysize; /* Bytes per way */
unsigned int waybit; /* Bits to select in a cache set */
unsigned int flags; /* Flags describing cache properties */
unsigned short sets; /* Number of lines per set */
unsigned char ways; /* Number of ways */
unsigned char linesz; /* Size of line in bytes */
unsigned char waybit; /* Bits to select in a cache set */
unsigned char flags; /* Flags describing cache properties */
};
/*

View File

@ -82,11 +82,9 @@
#ifndef __ASSEMBLY__
#include <linux/spinlock.h>
#include <linux/types.h>
extern u32 cached_kn02_csr;
extern spinlock_t kn02_lock;
extern void init_kn02_irqs(int base);
#endif

View File

@ -74,7 +74,9 @@
*
*/
#ifndef GENERIC_ISA_DMA_SUPPORT_BROKEN
#define MAX_DMA_CHANNELS 8
#endif
/*
* The maximum address in KSEG0 that we can perform a DMA transfer to on this

View File

@ -451,6 +451,13 @@
#define GT_SDRAM_OPMODE_OP_MODE 3
#define GT_SDRAM_OPMODE_OP_CBR 4
#define GT_TC_CONTROL_ENTC0_SHF 0
#define GT_TC_CONTROL_ENTC0_MSK (MSK(1) << GT_TC_CONTROL_ENTC0_SHF)
#define GT_TC_CONTROL_ENTC0_BIT GT_TC_CONTROL_ENTC0_MSK
#define GT_TC_CONTROL_SELTC0_SHF 1
#define GT_TC_CONTROL_SELTC0_MSK (MSK(1) << GT_TC_CONTROL_SELTC0_SHF)
#define GT_TC_CONTROL_SELTC0_BIT GT_TC_CONTROL_SELTC0_MSK
#define GT_PCI0_BARE_SWSCS3BOOTDIS_SHF 0
#define GT_PCI0_BARE_SWSCS3BOOTDIS_MSK (MSK(1) << GT_PCI0_BARE_SWSCS3BOOTDIS_SHF)
@ -523,6 +530,13 @@
#define GT_PCI0_CMD_SWORDSWAP_MSK (MSK(1) << GT_PCI0_CMD_SWORDSWAP_SHF)
#define GT_PCI0_CMD_SWORDSWAP_BIT GT_PCI0_CMD_SWORDSWAP_MSK
#define GT_INTR_T0EXP_SHF 8
#define GT_INTR_T0EXP_MSK (MSK(1) << GT_INTR_T0EXP_SHF)
#define GT_INTR_T0EXP_BIT GT_INTR_T0EXP_MSK
#define GT_INTR_RETRYCTR0_SHF 20
#define GT_INTR_RETRYCTR0_MSK (MSK(1) << GT_INTR_RETRYCTR0_SHF)
#define GT_INTR_RETRYCTR0_BIT GT_INTR_RETRYCTR0_MSK
/*
* Misc
*/

View File

@ -113,7 +113,7 @@ static inline void set_io_port_base(unsigned long base)
* almost all conceivable cases a device driver should not be using
* this function
*/
static inline unsigned long virt_to_phys(volatile void * address)
static inline unsigned long virt_to_phys(volatile const void *address)
{
return (unsigned long)address - PAGE_OFFSET;
}

View File

@ -24,8 +24,6 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif
extern asmlinkage unsigned int do_IRQ(unsigned int irq);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
@ -43,8 +41,6 @@ do { \
#define __DO_IRQ_SMTC_HOOK() do { } while (0)
#endif
#ifdef CONFIG_PREEMPT
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
@ -57,12 +53,10 @@ do { \
do { \
irq_enter(); \
__DO_IRQ_SMTC_HOOK(); \
__do_IRQ((irq)); \
generic_handle_irq(irq); \
irq_exit(); \
} while (0)
#endif
extern void arch_init_irq(void);
extern void spurious_interrupt(void);

32
include/asm-mips/kexec.h Normal file
View File

@ -0,0 +1,32 @@
/*
* kexec.h for kexec
* Created by <nschichan@corp.free.fr> on Thu Oct 12 14:59:34 2006
*
* This source code is licensed under the GNU General Public License,
* Version 2. See the file COPYING for more details.
*/
#ifndef _MIPS_KEXEC
# define _MIPS_KEXEC
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
#define KEXEC_CONTROL_CODE_SIZE 4096
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
#define MAX_NOTE_BYTES 1024
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
/* Dummy implementation for now */
}
#endif /* !_MIPS_KEXEC */

View File

@ -67,34 +67,9 @@
#define COBALT_BRD_ID_QUBE2 0x5
#define COBALT_BRD_ID_RAQ2 0x6
/*
* Galileo chipset access macros for the Cobalt. The base address for
* the GT64111 chip is 0x14000000
*
* Most of this really should go into a separate GT64111 header file.
*/
#define GT64111_IO_BASE 0x10000000UL
#define GT64111_IO_END 0x11ffffffUL
#define GT64111_MEM_BASE 0x12000000UL
#define GT64111_MEM_END 0x13ffffffUL
#define GT64111_BASE 0x14000000UL
#define GALILEO_REG(ofs) CKSEG1ADDR(GT64111_BASE + (unsigned long)(ofs))
#define GALILEO_INL(port) (*(volatile unsigned int *) GALILEO_REG(port))
#define GALILEO_OUTL(val, port) \
do { \
*(volatile unsigned int *) GALILEO_REG(port) = (val); \
} while (0)
#define GALILEO_INTR_T0EXP (1 << 8)
#define GALILEO_INTR_RETRY_CTR (1 << 20)
#define GALILEO_ENTC0 0x01
#define GALILEO_SELTC0 0x02
#define PCI_CFG_SET(devfn,where) \
GALILEO_OUTL((0x80000000 | (PCI_SLOT (devfn) << 11) | \
(PCI_FUNC (devfn) << 8) | (where)), GT_PCI0_CFGADDR_OFS)
GT_WRITE(GT_PCI0_CFGADDR_OFS, (0x80000000 | (PCI_SLOT (devfn) << 11) | \
(PCI_FUNC (devfn) << 8) | (where)))
#define COBALT_LED_PORT (*(volatile unsigned char *) CKSEG1ADDR(0x1c000000))
# define COBALT_LED_BAR_LEFT (1 << 0) /* Qube */

View File

@ -1 +1,27 @@
/* there's something here ... in the dark */
/*
* Copyright (C) 2006 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _COBALT_MACH_GT64120_H
#define _COBALT_MACH_GT64120_H
/*
* Cobalt uses GT64111. GT64111 is almost the same as GT64120.
*/
#define GT64120_BASE CKSEG1ADDR(GT_DEF_BASE)
#endif /* _COBALT_MACH_GT64120_H */

View File

@ -545,62 +545,6 @@
#define MIPS_FPIR_L (_ULCAST_(1) << 21)
#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
/*
* R10000 performance counter definitions.
*
* FIXME: The R10000 performance counter opens a nice way to implement CPU
* time accounting with a precission of one cycle. I don't have
* R10000 silicon but just a manual, so ...
*/
/*
* Events counted by counter #0
*/
#define CE0_CYCLES 0
#define CE0_INSN_ISSUED 1
#define CE0_LPSC_ISSUED 2
#define CE0_S_ISSUED 3
#define CE0_SC_ISSUED 4
#define CE0_SC_FAILED 5
#define CE0_BRANCH_DECODED 6
#define CE0_QW_WB_SECONDARY 7
#define CE0_CORRECTED_ECC_ERRORS 8
#define CE0_ICACHE_MISSES 9
#define CE0_SCACHE_I_MISSES 10
#define CE0_SCACHE_I_WAY_MISSPREDICTED 11
#define CE0_EXT_INTERVENTIONS_REQ 12
#define CE0_EXT_INVALIDATE_REQ 13
#define CE0_VIRTUAL_COHERENCY_COND 14
#define CE0_INSN_GRADUATED 15
/*
* Events counted by counter #1
*/
#define CE1_CYCLES 0
#define CE1_INSN_GRADUATED 1
#define CE1_LPSC_GRADUATED 2
#define CE1_S_GRADUATED 3
#define CE1_SC_GRADUATED 4
#define CE1_FP_INSN_GRADUATED 5
#define CE1_QW_WB_PRIMARY 6
#define CE1_TLB_REFILL 7
#define CE1_BRANCH_MISSPREDICTED 8
#define CE1_DCACHE_MISS 9
#define CE1_SCACHE_D_MISSES 10
#define CE1_SCACHE_D_WAY_MISSPREDICTED 11
#define CE1_EXT_INTERVENTION_HITS 12
#define CE1_EXT_INVALIDATE_REQ 13
#define CE1_SP_HINT_TO_CEXCL_SC_BLOCKS 14
#define CE1_SP_HINT_TO_SHARED_SC_BLOCKS 15
/*
* These flags define in which privilege mode the counters count events
*/
#define CEB_USER 8 /* Count events in user mode, EXL = ERL = 0 */
#define CEB_SUPERVISOR 4 /* Count events in supvervisor mode EXL = ERL = 0 */
#define CEB_KERNEL 2 /* Count events in kernel mode EXL = ERL = 0 */
#define CEB_EXL 1 /* Count events with EXL = 1, ERL = 0 */
#ifndef __ASSEMBLY__
/*

View File

@ -34,7 +34,9 @@
#ifndef __ASSEMBLY__
#include <linux/pfn.h>
#include <asm/cpu-features.h>
#include <asm/io.h>
extern void clear_page(void * page);
extern void copy_page(void * to, void * from);
@ -134,8 +136,14 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#if defined(CONFIG_64BIT) && !defined(CONFIG_BUILD_ELF64)
#define __pa_page_offset(x) ((unsigned long)(x) < CKSEG0 ? PAGE_OFFSET : CKSEG0)
#else
#define __pa_page_offset(x) PAGE_OFFSET
#endif
#define __pa(x) ((unsigned long)(x) - __pa_page_offset(x))
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0))
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
@ -160,8 +168,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr)))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

View File

@ -14,6 +14,7 @@
#include <asm/addrspace.h>
#include <asm/page.h>
#include <asm/cachectl.h>
#include <asm/fixmap.h>
#include <asm-generic/pgtable-nopud.h>
@ -103,6 +104,13 @@
#define VMALLOC_START MAP_BASE
#define VMALLOC_END \
(VMALLOC_START + PTRS_PER_PGD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE)
#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64) && \
VMALLOC_START != CKSSEG
/* Load modules into 32bit-compatible segment. */
#define MODULE_START CKSSEG
#define MODULE_END (FIXADDR_START-2*PAGE_SIZE)
extern pgd_t module_pg_dir[PTRS_PER_PGD];
#endif
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
@ -174,7 +182,12 @@ static inline void pud_clear(pud_t *pudp)
#define __pmd_offset(address) pmd_index(address)
/* to find an entry in a kernel page-table-directory */
#ifdef MODULE_START
#define pgd_offset_k(address) \
((address) >= MODULE_START ? module_pg_dir : pgd_offset(&init_mm, 0UL))
#else
#define pgd_offset_k(address) pgd_offset(&init_mm, 0UL)
#endif
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))

View File

@ -67,7 +67,7 @@ extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
#define __HAVE_ARCH_MOVE_PTE
#define move_pte(pte, prot, old_addr, new_addr) \

View File

@ -80,8 +80,6 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
extern void show_regs(struct pt_regs *);
extern asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit);
#endif

View File

@ -21,6 +21,7 @@
#include <linux/ptrace.h>
#include <linux/rtc.h>
#include <linux/spinlock.h>
#include <linux/clocksource.h>
extern spinlock_t rtc_lock;
@ -44,12 +45,10 @@ extern int (*mips_timer_state)(void);
extern void (*mips_timer_ack)(void);
/*
* High precision timer functions.
* If mips_hpt_read is NULL, an R4k-compatible timer setup is attempted.
* High precision timer clocksource.
* If .read is NULL, an R4k-compatible timer setup is attempted.
*/
extern unsigned int (*mips_hpt_read)(void);
extern void (*mips_hpt_init)(void);
extern unsigned int mips_hpt_mask;
extern struct clocksource clocksource_mips;
/*
* to_tm() converts system time back to (year, mon, day, hour, min, sec).

View File

@ -122,6 +122,8 @@ extern struct kimage *kexec_crash_image;
#define KEXEC_ARCH_IA_64 (50 << 16)
#define KEXEC_ARCH_S390 (22 << 16)
#define KEXEC_ARCH_SH (42 << 16)
#define KEXEC_ARCH_MIPS_LE (10 << 16)
#define KEXEC_ARCH_MIPS ( 8 << 16)
#define KEXEC_FLAGS (KEXEC_ON_CRASH) /* List of defined/legal kexec flags */