dect
/
linux-2.6
Archived
13
0
Fork 0
This repository has been archived on 2022-02-17. You can view files and clone it, but cannot push or open issues or pull requests.
linux-2.6/arch/x86/kernel/irq_64.c

267 lines
6.5 KiB
C
Raw Normal View History

/*
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*
* This file contains the lowest level x86_64-specific interrupt
* entry and irq statistics code. All the remaining irq logic is
* done by the generic kernel/irq/ code and in the
* x86_64-specific irq controller code. (e.g. i8259.c and
* io_apic.c.)
*/
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/io_apic.h>
#include <asm/idle.h>
#include <asm/smp.h>
atomic_t irq_err_count;
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(unsigned int irq)
{
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
* irq slots per priority level, and a 'hanging, unacked' IRQ
* holds up an irq slot - in excessive cases (when multiple
* unexpected vectors occur) that might lock up the APIC
* completely.
* But don't ack when the APIC is disabled. -AK
*/
if (!disable_apic)
ack_APIC_irq();
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/*
* Probabilistic stack overflow check:
*
* Only check the stack in process context, because everything else
* runs on the big interrupt stacks. Checking reliably is too expensive,
* so we just check from interrupts.
*/
static inline void stack_overflow_check(struct pt_regs *regs)
{
u64 curbase = (u64)task_stack_page(current);
static unsigned long warned = -60*HZ;
if (regs->sp >= curbase && regs->sp <= curbase + THREAD_SIZE &&
regs->sp < curbase + sizeof(struct thread_info) + 128 &&
time_after(jiffies, warned + 60*HZ)) {
printk("do_IRQ: %s near stack overflow (cur:%Lx,sp:%lx)\n",
current->comm, curbase, regs->sp);
show_stack(NULL,NULL);
warned = jiffies;
}
}
#endif
/*
* Generic, controller-independent functions:
*/
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
struct irqaction * action;
unsigned long flags;
if (i == 0) {
seq_printf(p, " ");
for_each_online_cpu(j)
seq_printf(p, "CPU%-8d",j);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
unsigned any_count = 0;
spin_lock_irqsave(&irq_desc[i].lock, flags);
#ifndef CONFIG_SMP
any_count = kstat_irqs(i);
#else
for_each_online_cpu(j)
any_count |= kstat_cpu(j).irqs[i];
#endif
action = irq_desc[i].action;
if (!action && !any_count)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#endif
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, "-%-8s", irq_desc[i].name);
if (action) {
seq_printf(p, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(p, ", %s", action->name);
}
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "NMI: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
x86: expand /proc/interrupts to include missing vectors, v2 Add missing IRQs and IRQ descriptions to /proc/interrupts. /proc/interrupts is most useful when it displays every IRQ vector in use by the system, not just those somebody thought would be interesting. This patch inserts the following vector displays to the i386 and x86_64 platforms, as appropriate: rescheduling interrupts TLB flush interrupts function call interrupts thermal event interrupts threshold interrupts spurious interrupts A threshold interrupt occurs when ECC memory correction is occuring at too high a frequency. Thresholds are used by the ECC hardware as occasional ECC failures are part of normal operation, but long sequences of ECC failures usually indicate a memory chip that is about to fail. Thermal event interrupts occur when a temperature threshold has been exceeded for some CPU chip. IIRC, a thermal interrupt is also generated when the temperature drops back to a normal level. A spurious interrupt is an interrupt that was raised then lowered by the device before it could be fully processed by the APIC. Hence the apic sees the interrupt but does not know what device it came from. For this case the APIC hardware will assume a vector of 0xff. Rescheduling, call, and TLB flush interrupts are sent from one CPU to another per the needs of the OS. Typically, their statistics would be used to discover if an interrupt flood of the given type has been occuring. AK: merged v2 and v4 which had some more tweaks AK: replace Local interrupts with Local timer interrupts AK: Fixed description of interrupt types. [ tglx: arch/x86 adaptation ] [ mingo: small cleanup ] Signed-off-by: Joe Korty <joe.korty@ccur.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Tim Hockin <thockin@hockin.org> Cc: Andi Kleen <ak@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2007-10-17 16:04:40 +00:00
seq_printf(p, " Non-maskable interrupts\n");
seq_printf(p, "LOC: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
x86: expand /proc/interrupts to include missing vectors, v2 Add missing IRQs and IRQ descriptions to /proc/interrupts. /proc/interrupts is most useful when it displays every IRQ vector in use by the system, not just those somebody thought would be interesting. This patch inserts the following vector displays to the i386 and x86_64 platforms, as appropriate: rescheduling interrupts TLB flush interrupts function call interrupts thermal event interrupts threshold interrupts spurious interrupts A threshold interrupt occurs when ECC memory correction is occuring at too high a frequency. Thresholds are used by the ECC hardware as occasional ECC failures are part of normal operation, but long sequences of ECC failures usually indicate a memory chip that is about to fail. Thermal event interrupts occur when a temperature threshold has been exceeded for some CPU chip. IIRC, a thermal interrupt is also generated when the temperature drops back to a normal level. A spurious interrupt is an interrupt that was raised then lowered by the device before it could be fully processed by the APIC. Hence the apic sees the interrupt but does not know what device it came from. For this case the APIC hardware will assume a vector of 0xff. Rescheduling, call, and TLB flush interrupts are sent from one CPU to another per the needs of the OS. Typically, their statistics would be used to discover if an interrupt flood of the given type has been occuring. AK: merged v2 and v4 which had some more tweaks AK: replace Local interrupts with Local timer interrupts AK: Fixed description of interrupt types. [ tglx: arch/x86 adaptation ] [ mingo: small cleanup ] Signed-off-by: Joe Korty <joe.korty@ccur.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Tim Hockin <thockin@hockin.org> Cc: Andi Kleen <ak@suse.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2007-10-17 16:04:40 +00:00
seq_printf(p, " Local timer interrupts\n");
#ifdef CONFIG_SMP
seq_printf(p, "RES: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_resched_count);
seq_printf(p, " Rescheduling interrupts\n");
seq_printf(p, "CAL: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_call_count);
seq_printf(p, " function call interrupts\n");
seq_printf(p, "TLB: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_tlb_count);
seq_printf(p, " TLB shootdowns\n");
#endif
seq_printf(p, "TRM: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_thermal_count);
seq_printf(p, " Thermal event interrupts\n");
seq_printf(p, "THR: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_threshold_count);
seq_printf(p, " Threshold APIC interrupts\n");
seq_printf(p, "SPU: ");
for_each_online_cpu(j)
seq_printf(p, "%10u ", cpu_pda(j)->irq_spurious_count);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
}
return 0;
}
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers Maintain a per-CPU global "struct pt_regs *" variable which can be used instead of passing regs around manually through all ~1800 interrupt handlers in the Linux kernel. The regs pointer is used in few places, but it potentially costs both stack space and code to pass it around. On the FRV arch, removing the regs parameter from all the genirq function results in a 20% speed up of the IRQ exit path (ie: from leaving timer_interrupt() to leaving do_IRQ()). Where appropriate, an arch may override the generic storage facility and do something different with the variable. On FRV, for instance, the address is maintained in GR28 at all times inside the kernel as part of general exception handling. Having looked over the code, it appears that the parameter may be handed down through up to twenty or so layers of functions. Consider a USB character device attached to a USB hub, attached to a USB controller that posts its interrupts through a cascaded auxiliary interrupt controller. A character device driver may want to pass regs to the sysrq handler through the input layer which adds another few layers of parameter passing. I've build this code with allyesconfig for x86_64 and i386. I've runtested the main part of the code on FRV and i386, though I can't test most of the drivers. I've also done partial conversion for powerpc and MIPS - these at least compile with minimal configurations. This will affect all archs. Mostly the changes should be relatively easy. Take do_IRQ(), store the regs pointer at the beginning, saving the old one: struct pt_regs *old_regs = set_irq_regs(regs); And put the old one back at the end: set_irq_regs(old_regs); Don't pass regs through to generic_handle_irq() or __do_IRQ(). In timer_interrupt(), this sort of change will be necessary: - update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING, regs); + update_process_times(user_mode(get_irq_regs())); + profile_tick(CPU_PROFILING); I'd like to move update_process_times()'s use of get_irq_regs() into itself, except that i386, alone of the archs, uses something other than user_mode(). Some notes on the interrupt handling in the drivers: (*) input_dev() is now gone entirely. The regs pointer is no longer stored in the input_dev struct. (*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does something different depending on whether it's been supplied with a regs pointer or not. (*) Various IRQ handler function pointers have been moved to type irq_handler_t. Signed-Off-By: David Howells <dhowells@redhat.com> (cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
{
struct pt_regs *old_regs = set_irq_regs(regs);
/* high bit used in ret_from_ code */
unsigned vector = ~regs->orig_ax;
unsigned irq;
exit_idle();
irq_enter();
irq = __get_cpu_var(vector_irq)[vector];
#ifdef CONFIG_DEBUG_STACKOVERFLOW
stack_overflow_check(regs);
#endif
if (likely(irq < NR_IRQS))
generic_handle_irq(irq);
else {
if (!disable_apic)
ack_APIC_irq();
if (printk_ratelimit())
printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
__func__, smp_processor_id(), vector);
}
irq_exit();
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers Maintain a per-CPU global "struct pt_regs *" variable which can be used instead of passing regs around manually through all ~1800 interrupt handlers in the Linux kernel. The regs pointer is used in few places, but it potentially costs both stack space and code to pass it around. On the FRV arch, removing the regs parameter from all the genirq function results in a 20% speed up of the IRQ exit path (ie: from leaving timer_interrupt() to leaving do_IRQ()). Where appropriate, an arch may override the generic storage facility and do something different with the variable. On FRV, for instance, the address is maintained in GR28 at all times inside the kernel as part of general exception handling. Having looked over the code, it appears that the parameter may be handed down through up to twenty or so layers of functions. Consider a USB character device attached to a USB hub, attached to a USB controller that posts its interrupts through a cascaded auxiliary interrupt controller. A character device driver may want to pass regs to the sysrq handler through the input layer which adds another few layers of parameter passing. I've build this code with allyesconfig for x86_64 and i386. I've runtested the main part of the code on FRV and i386, though I can't test most of the drivers. I've also done partial conversion for powerpc and MIPS - these at least compile with minimal configurations. This will affect all archs. Mostly the changes should be relatively easy. Take do_IRQ(), store the regs pointer at the beginning, saving the old one: struct pt_regs *old_regs = set_irq_regs(regs); And put the old one back at the end: set_irq_regs(old_regs); Don't pass regs through to generic_handle_irq() or __do_IRQ(). In timer_interrupt(), this sort of change will be necessary: - update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING, regs); + update_process_times(user_mode(get_irq_regs())); + profile_tick(CPU_PROFILING); I'd like to move update_process_times()'s use of get_irq_regs() into itself, except that i386, alone of the archs, uses something other than user_mode(). Some notes on the interrupt handling in the drivers: (*) input_dev() is now gone entirely. The regs pointer is no longer stored in the input_dev struct. (*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does something different depending on whether it's been supplied with a regs pointer or not. (*) Various IRQ handler function pointers have been moved to type irq_handler_t. Signed-Off-By: David Howells <dhowells@redhat.com> (cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
set_irq_regs(old_regs);
return 1;
}
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map)
{
unsigned int irq;
static int warned;
for (irq = 0; irq < NR_IRQS; irq++) {
cpumask_t mask;
x86_64 irq: use mask/unmask and proper locking in fixup_irqs() Force irq migration path during cpu offline, is not using proper locks and irq_chip mask/unmask routines. This will result in some races(especially the device generating the interrupt can see some inconsistent state, resulting in issues like stuck irq,..). Appended patch fixes the issue by taking proper lock and encapsulating irq_chip set_affinity() with a mask() before and an unmask() after. This fixes a MSI irq stuck issue reported by Darrick Wong. There are several more general bugs in this area(irq migration in the process context). For example, 1. Possibility of missing edge triggered irq. 2. Reliable method of migrating level triggered irq in the process context. We plan to look and close these in the near future. Eric says: In addition even with the fix from Suresh there is still at least one nasty hardware race in fixup_irqs(). However we exercise that code path rarely enough that we are unlikely to hit it in the real world, and that race seems to have existed since the code was merged. And a fix for that is not coming soon as it is an open investigation area if we can fix irq migration to work outside of irq context or if we have to rework the requirements imposed by the generic cpu hotplug and layer on fixup_irqs(). So this may come up again. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Reported-and-tested-by: Darrick Wong <djwong@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-06-25 22:52:35 +00:00
int break_affinity = 0;
int set_affinity = 1;
if (irq == 2)
continue;
x86_64 irq: use mask/unmask and proper locking in fixup_irqs() Force irq migration path during cpu offline, is not using proper locks and irq_chip mask/unmask routines. This will result in some races(especially the device generating the interrupt can see some inconsistent state, resulting in issues like stuck irq,..). Appended patch fixes the issue by taking proper lock and encapsulating irq_chip set_affinity() with a mask() before and an unmask() after. This fixes a MSI irq stuck issue reported by Darrick Wong. There are several more general bugs in this area(irq migration in the process context). For example, 1. Possibility of missing edge triggered irq. 2. Reliable method of migrating level triggered irq in the process context. We plan to look and close these in the near future. Eric says: In addition even with the fix from Suresh there is still at least one nasty hardware race in fixup_irqs(). However we exercise that code path rarely enough that we are unlikely to hit it in the real world, and that race seems to have existed since the code was merged. And a fix for that is not coming soon as it is an open investigation area if we can fix irq migration to work outside of irq context or if we have to rework the requirements imposed by the generic cpu hotplug and layer on fixup_irqs(). So this may come up again. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Reported-and-tested-by: Darrick Wong <djwong@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-06-25 22:52:35 +00:00
/* interrupt's are disabled at this point */
spin_lock(&irq_desc[irq].lock);
if (!irq_has_action(irq) ||
cpus_equal(irq_desc[irq].affinity, map)) {
spin_unlock(&irq_desc[irq].lock);
continue;
}
cpus_and(mask, irq_desc[irq].affinity, map);
x86_64 irq: use mask/unmask and proper locking in fixup_irqs() Force irq migration path during cpu offline, is not using proper locks and irq_chip mask/unmask routines. This will result in some races(especially the device generating the interrupt can see some inconsistent state, resulting in issues like stuck irq,..). Appended patch fixes the issue by taking proper lock and encapsulating irq_chip set_affinity() with a mask() before and an unmask() after. This fixes a MSI irq stuck issue reported by Darrick Wong. There are several more general bugs in this area(irq migration in the process context). For example, 1. Possibility of missing edge triggered irq. 2. Reliable method of migrating level triggered irq in the process context. We plan to look and close these in the near future. Eric says: In addition even with the fix from Suresh there is still at least one nasty hardware race in fixup_irqs(). However we exercise that code path rarely enough that we are unlikely to hit it in the real world, and that race seems to have existed since the code was merged. And a fix for that is not coming soon as it is an open investigation area if we can fix irq migration to work outside of irq context or if we have to rework the requirements imposed by the generic cpu hotplug and layer on fixup_irqs(). So this may come up again. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Reported-and-tested-by: Darrick Wong <djwong@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-06-25 22:52:35 +00:00
if (cpus_empty(mask)) {
break_affinity = 1;
mask = map;
}
x86_64 irq: use mask/unmask and proper locking in fixup_irqs() Force irq migration path during cpu offline, is not using proper locks and irq_chip mask/unmask routines. This will result in some races(especially the device generating the interrupt can see some inconsistent state, resulting in issues like stuck irq,..). Appended patch fixes the issue by taking proper lock and encapsulating irq_chip set_affinity() with a mask() before and an unmask() after. This fixes a MSI irq stuck issue reported by Darrick Wong. There are several more general bugs in this area(irq migration in the process context). For example, 1. Possibility of missing edge triggered irq. 2. Reliable method of migrating level triggered irq in the process context. We plan to look and close these in the near future. Eric says: In addition even with the fix from Suresh there is still at least one nasty hardware race in fixup_irqs(). However we exercise that code path rarely enough that we are unlikely to hit it in the real world, and that race seems to have existed since the code was merged. And a fix for that is not coming soon as it is an open investigation area if we can fix irq migration to work outside of irq context or if we have to rework the requirements imposed by the generic cpu hotplug and layer on fixup_irqs(). So this may come up again. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Reported-and-tested-by: Darrick Wong <djwong@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-06-25 22:52:35 +00:00
if (irq_desc[irq].chip->mask)
irq_desc[irq].chip->mask(irq);
[PATCH] genirq: rename desc->handler to desc->chip This patch-queue improves the generic IRQ layer to be truly generic, by adding various abstractions and features to it, without impacting existing functionality. While the queue can be best described as "fix and improve everything in the generic IRQ layer that we could think of", and thus it consists of many smaller features and lots of cleanups, the one feature that stands out most is the new 'irq chip' abstraction. The irq-chip abstraction is about describing and coding and IRQ controller driver by mapping its raw hardware capabilities [and quirks, if needed] in a straightforward way, without having to think about "IRQ flow" (level/edge/etc.) type of details. This stands in contrast with the current 'irq-type' model of genirq architectures, which 'mixes' raw hardware capabilities with 'flow' details. The patchset supports both types of irq controller designs at once, and converts i386 and x86_64 to the new irq-chip design. As a bonus side-effect of the irq-chip approach, chained interrupt controllers (master/slave PIC constructs, etc.) are now supported by design as well. The end result of this patchset intends to be simpler architecture-level code and more consolidation between architectures. We reused many bits of code and many concepts from Russell King's ARM IRQ layer, the merging of which was one of the motivations for this patchset. This patch: rename desc->handler to desc->chip. Originally i did not want to do this, because it's a big patch. But having both "desc->handler", "desc->handle_irq" and "action->handler" caused a large degree of confusion and made the code appear alot less clean than it truly is. I have also attempted a dual approach as well by introducing a desc->chip alias - but that just wasnt robust enough and broke frequently. So lets get over with this quickly. The conversion was done automatically via scripts and converts all the code in the kernel. This renaming patch is the first one amongst the patches, so that the remaining patches can stay flexible and can be merged and split up without having some big monolithic patch act as a merge barrier. [akpm@osdl.org: build fix] [akpm@osdl.org: another build fix] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-29 09:24:36 +00:00
if (irq_desc[irq].chip->set_affinity)
irq_desc[irq].chip->set_affinity(irq, mask);
x86_64 irq: use mask/unmask and proper locking in fixup_irqs() Force irq migration path during cpu offline, is not using proper locks and irq_chip mask/unmask routines. This will result in some races(especially the device generating the interrupt can see some inconsistent state, resulting in issues like stuck irq,..). Appended patch fixes the issue by taking proper lock and encapsulating irq_chip set_affinity() with a mask() before and an unmask() after. This fixes a MSI irq stuck issue reported by Darrick Wong. There are several more general bugs in this area(irq migration in the process context). For example, 1. Possibility of missing edge triggered irq. 2. Reliable method of migrating level triggered irq in the process context. We plan to look and close these in the near future. Eric says: In addition even with the fix from Suresh there is still at least one nasty hardware race in fixup_irqs(). However we exercise that code path rarely enough that we are unlikely to hit it in the real world, and that race seems to have existed since the code was merged. And a fix for that is not coming soon as it is an open investigation area if we can fix irq migration to work outside of irq context or if we have to rework the requirements imposed by the generic cpu hotplug and layer on fixup_irqs(). So this may come up again. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Reported-and-tested-by: Darrick Wong <djwong@us.ibm.com> Cc: Andi Kleen <ak@suse.de> Acked-by: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-06-25 22:52:35 +00:00
else if (!(warned++))
set_affinity = 0;
if (irq_desc[irq].chip->unmask)
irq_desc[irq].chip->unmask(irq);
spin_unlock(&irq_desc[irq].lock);
if (break_affinity && set_affinity)
printk("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
printk("Cannot set affinity for irq %i\n", irq);
}
/* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
}
#endif
extern void call_softirq(void);
asmlinkage void do_softirq(void)
{
__u32 pending;
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
pending = local_softirq_pending();
/* Switch to interrupt stack */
if (pending) {
call_softirq();
WARN_ON_ONCE(softirq_count());
}
local_irq_restore(flags);
}