diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 65b52320d14..83a4ea6e3d6 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -57,7 +57,7 @@ static void smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig); /* -5B * Structure and data for smp_call_function(). This is designed to minimise + * Structure and data for smp_call_function(). This is designed to minimise * static memory requirements. It also looks cleaner. */ static DEFINE_SPINLOCK(call_lock); @@ -104,7 +104,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, * remote CPUs are nearly ready to execute <> or are or have executed. * * You must not call this function with disabled interrupts or from a - * hardware interrupt handler or from a bottom half handler. + * hardware interrupt handler. */ { struct call_data_struct data; @@ -113,8 +113,8 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (cpus <= 0) return 0; - /* Can deadlock when called with interrupts disabled */ - WARN_ON(irqs_disabled()); + /* Can deadlock when interrupts are disabled or if in wrong context */ + WARN_ON(irqs_disabled() || in_irq()); data.func = func; data.info = info; @@ -123,7 +123,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (wait) atomic_set(&data.finished, 0); - spin_lock(&call_lock); + spin_lock_bh(&call_lock); call_data = &data; /* Send a message to all other CPUs and wait for them to respond */ smp_ext_bitcall_others(ec_call_function); @@ -135,7 +135,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, if (wait) while (atomic_read(&data.finished) != cpus) cpu_relax(); - spin_unlock(&call_lock); + spin_unlock_bh(&call_lock); return 0; } @@ -159,6 +159,9 @@ int smp_call_function_on(void (*func) (void *info), void *info, if (!cpu_online(cpu)) return -EINVAL; + /* Can deadlock when interrupts are disabled or if in wrong context */ + WARN_ON(irqs_disabled() || in_irq()); + /* disable preemption for local function call */ curr_cpu = get_cpu(); diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c index f17275917fe..997f4687453 100644 --- a/drivers/s390/cio/device_id.c +++ b/drivers/s390/cio/device_id.c @@ -11,6 +11,7 @@ #include #include +#include #include #include @@ -138,7 +139,7 @@ VM_virtual_device_info (__u16 devno, struct senseid *ps) ps->cu_model = 0x60; return; } - for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++) + for (i = 0; i < ARRAY_SIZE(vm_devices); i++) if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla && diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) { ps->cu_type = vm_devices[i].cu_type; diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index d7b25b8f71d..7c7775aae38 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -23,8 +23,7 @@ #include "chsc.h" #include "device.h" -int -ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) +int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags) { /* * The flag usage is mutal exclusive ... @@ -39,6 +38,33 @@ ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) return 0; } +int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) +{ + /* + * The flag usage is mutal exclusive ... + */ + if (((flags & CCWDEV_EARLY_NOTIFICATION) && + (flags & CCWDEV_REPORT_ALL)) || + ((flags & CCWDEV_EARLY_NOTIFICATION) && + cdev->private->options.repall) || + ((flags & CCWDEV_REPORT_ALL) && + cdev->private->options.fast)) + return -EINVAL; + cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0; + cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0; + cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0; + cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0; + return 0; +} + +void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags) +{ + cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0; + cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0; + cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0; + cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0; +} + int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) { @@ -601,7 +627,9 @@ _ccw_device_get_device_number(struct ccw_device *cdev) MODULE_LICENSE("GPL"); +EXPORT_SYMBOL(ccw_device_set_options_mask); EXPORT_SYMBOL(ccw_device_set_options); +EXPORT_SYMBOL(ccw_device_clear_options); EXPORT_SYMBOL(ccw_device_clear); EXPORT_SYMBOL(ccw_device_halt); EXPORT_SYMBOL(ccw_device_resume); diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index d726cd5777d..5b1e3ff26c0 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -3194,7 +3194,7 @@ qdio_establish(struct qdio_initialize *init_data) spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); - ccw_device_set_options(cdev, 0); + ccw_device_set_options_mask(cdev, 0); result=ccw_device_start_timeout(cdev,&irq_ptr->ccw, QDIO_DOING_ESTABLISH,0, 0, QDIO_ESTABLISH_TIMEOUT); diff --git a/include/asm-s390/ccwdev.h b/include/asm-s390/ccwdev.h index 58c70acffc7..cfc81533b9b 100644 --- a/include/asm-s390/ccwdev.h +++ b/include/asm-s390/ccwdev.h @@ -110,7 +110,9 @@ extern void ccw_driver_unregister (struct ccw_driver *driver); struct ccw1; +extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long); extern int ccw_device_set_options(struct ccw_device *, unsigned long); +extern void ccw_device_clear_options(struct ccw_device *, unsigned long); /* Allow for i/o completion notification after primary interrupt status. */ #define CCWDEV_EARLY_NOTIFICATION 0x0001 diff --git a/include/asm-s390/io.h b/include/asm-s390/io.h index dca6a6cc103..b7ff6afc3ca 100644 --- a/include/asm-s390/io.h +++ b/include/asm-s390/io.h @@ -17,8 +17,6 @@ #define IO_SPACE_LIMIT 0xffffffff -#define __io_virt(x) ((void *)(PAGE_OFFSET | (unsigned long)(x))) - /* * Change virtual addresses to physical addresses and vv. * These are pretty trivial @@ -37,11 +35,9 @@ static inline unsigned long virt_to_phys(volatile void * address) static inline void * phys_to_virt(unsigned long address) { - return __io_virt(address); + return (void *) address; } -#define mmiowb() do { } while (0) - /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access