dect
/
linux-2.6
Archived
13
0
Fork 0

[IA64] Implement smp_call_function_mask for ia64

This interface provides more flexible functionality for smp
infrastructure ... e.g. KVM frequently needs to operate on
a subset of cpus.

Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Xiantao Zhang 2008-04-03 11:39:43 -07:00 committed by Tony Luck
parent 96651896b8
commit 31a6b11fed
2 changed files with 85 additions and 0 deletions

View File

@ -209,6 +209,19 @@ send_IPI_allbutself (int op)
}
}
/*
* Called with preemption disabled.
*/
static inline void
send_IPI_mask(cpumask_t mask, int op)
{
unsigned int cpu;
for_each_cpu_mask(cpu, mask) {
send_IPI_single(cpu, op);
}
}
/*
* Called with preemption disabled.
*/
@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
}
EXPORT_SYMBOL(smp_call_function_single);
/**
* smp_call_function_mask(): Run a function on a set of other CPUs.
* <mask> The set of cpus to run on. Must not include the current cpu.
* <func> The function to run. This must be fast and non-blocking.
* <info> An arbitrary pointer to pass to the function.
* <wait> If true, wait (atomically) until function
* has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
{
struct call_data_struct data;
cpumask_t allbutself;
int cpus;
spin_lock(&call_lock);
allbutself = cpu_online_map;
cpu_clear(smp_processor_id(), allbutself);
cpus_and(mask, mask, allbutself);
cpus = cpus_weight(mask);
if (!cpus) {
spin_unlock(&call_lock);
return 0;
}
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
call_data = &data;
mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/
/* Send a message to other CPUs */
if (cpus_equal(mask, allbutself))
send_IPI_allbutself(IPI_CALL_FUNC);
else
send_IPI_mask(mask, IPI_CALL_FUNC);
/* Wait for response */
while (atomic_read(&data.started) != cpus)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
call_data = NULL;
spin_unlock(&call_lock);
return 0;
}
EXPORT_SYMBOL(smp_call_function_mask);
/*
* this function sends a 'generic call function' IPI to all other CPUs
* in the system.

View File

@ -38,6 +38,9 @@ ia64_get_lid (void)
return lid.f.id << 8 | lid.f.eid;
}
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
#define hard_smp_processor_id() ia64_get_lid()
#ifdef CONFIG_SMP