dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (36 commits)
  [S390] Remove code duplication from monreader / dcssblk.
  [S390] kernel: show last breaking-event-address on oops
  [S390] lowcore: Change type of lowcores softirq_pending to __u32.
  [S390] zcrypt: Comments and kernel-doc cleanup
  [S390] uaccess: Always access the correct address space.
  [S390] Fix a lot of sparse warnings.
  [S390] Convert s390 to GENERIC_CLOCKEVENTS.
  [S390] genirq/clockevents: move irq affinity prototypes/inlines to interrupt.h
  [S390] Convert monitor calls to function calls.
  [S390] qdio (new feature): enhancing info-retrieval from QDIO-adapters
  [S390] replace remaining __FUNCTION__ occurrences
  [S390] remove redundant display of free swap space in show_mem()
  [S390] qdio: remove outdated developerworks link.
  [S390] Add debug_register_mode() function to debug feature API
  [S390] crypto: use more descriptive function names for init/exit routines.
  [S390] switch sched_clock to store-clock-extended.
  [S390] zcrypt: add support for large random numbers
  [S390] hw_random: allow rng_dev_read() to return hardware errors.
  [S390] Vertical cpu management.
  [S390] cpu topology support for s390.
  ...
This commit is contained in:
Linus Torvalds 2008-04-18 08:19:15 -07:00
commit 4cba84b5d6
95 changed files with 2180 additions and 1116 deletions

View File

@ -115,6 +115,27 @@ Return Value: Handle for generated debug area
Description: Allocates memory for a debug log
Must not be called within an interrupt handler
----------------------------------------------------------------------------
debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid);
Parameter: name: Name of debug log (e.g. used for debugfs entry)
pages: Number of pages, which will be allocated per area
nr_areas: Number of debug areas
buf_size: Size of data area in each debug entry
mode: File mode for debugfs files. E.g. S_IRWXUGO
uid: User ID for debugfs files. Currently only 0 is
supported.
gid: Group ID for debugfs files. Currently only 0 is
supported.
Return Value: Handle for generated debug area
NULL if register failed
Description: Allocates memory for a debug log
Must not be called within an interrupt handler
---------------------------------------------------------------------------
void debug_unregister (debug_info_t * id);

View File

@ -3,6 +3,10 @@
# see Documentation/kbuild/kconfig-language.txt.
#
config SCHED_MC
def_bool y
depends on SMP
config MMU
def_bool y
@ -39,6 +43,9 @@ config GENERIC_HWEIGHT
config GENERIC_TIME
def_bool y
config GENERIC_CLOCKEVENTS
def_bool y
config GENERIC_BUG
bool
depends on BUG
@ -69,6 +76,8 @@ menu "Base setup"
comment "Processor type and features"
source "kernel/time/Kconfig"
config 64BIT
bool "64 bit kernel"
help
@ -301,10 +310,7 @@ config QDIO
tristate "QDIO support"
---help---
This driver provides the Queued Direct I/O base support for
IBM mainframes.
For details please refer to the documentation provided by IBM at
<http://www10.software.ibm.com/developerworks/opensource/linux390>
IBM System z.
To compile this driver as a module, choose M here: the
module will be called qdio.
@ -486,25 +492,6 @@ config APPLDATA_NET_SUM
source kernel/Kconfig.hz
config NO_IDLE_HZ
bool "No HZ timer ticks in idle"
help
Switches the regular HZ timer off when the system is going idle.
This helps z/VM to detect that the Linux system is idle. VM can
then "swap-out" this guest which reduces memory usage. It also
reduces the overhead of idle systems.
The HZ timer can be switched on/off via /proc/sys/kernel/hz_timer.
hz_timer=0 means HZ timer is disabled. hz_timer=1 means HZ
timer is active.
config NO_IDLE_HZ_INIT
bool "HZ timer in idle off by default"
depends on NO_IDLE_HZ
help
The HZ timer is switched off in idle by default. That means the
HZ timer is already disabled at boot time.
config S390_HYPFS_FS
bool "s390 hypervisor file system support"
select SYS_HYPERVISOR

View File

@ -499,7 +499,7 @@ static struct crypto_alg cbc_aes_alg = {
}
};
static int __init aes_init(void)
static int __init aes_s390_init(void)
{
int ret;
@ -542,15 +542,15 @@ aes_err:
goto out;
}
static void __exit aes_fini(void)
static void __exit aes_s390_fini(void)
{
crypto_unregister_alg(&cbc_aes_alg);
crypto_unregister_alg(&ecb_aes_alg);
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
module_init(aes_s390_init);
module_exit(aes_s390_fini);
MODULE_ALIAS("aes");

View File

@ -550,7 +550,7 @@ static struct crypto_alg cbc_des3_192_alg = {
}
};
static int init(void)
static int des_s390_init(void)
{
int ret = 0;
@ -612,7 +612,7 @@ des_err:
goto out;
}
static void __exit fini(void)
static void __exit des_s390_fini(void)
{
crypto_unregister_alg(&cbc_des3_192_alg);
crypto_unregister_alg(&ecb_des3_192_alg);
@ -625,8 +625,8 @@ static void __exit fini(void)
crypto_unregister_alg(&des_alg);
}
module_init(init);
module_exit(fini);
module_init(des_s390_init);
module_exit(des_s390_fini);
MODULE_ALIAS("des");
MODULE_ALIAS("des3_ede");

View File

@ -137,7 +137,7 @@ static struct crypto_alg alg = {
.dia_final = sha1_final } }
};
static int __init init(void)
static int __init sha1_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_1))
return -EOPNOTSUPP;
@ -145,13 +145,13 @@ static int __init init(void)
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit sha1_s390_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(sha1_s390_init);
module_exit(sha1_s390_fini);
MODULE_ALIAS("sha1");

View File

@ -133,7 +133,7 @@ static struct crypto_alg alg = {
.dia_final = sha256_final } }
};
static int init(void)
static int sha256_s390_init(void)
{
if (!crypt_s390_func_available(KIMD_SHA_256))
return -EOPNOTSUPP;
@ -141,13 +141,13 @@ static int init(void)
return crypto_register_alg(&alg);
}
static void __exit fini(void)
static void __exit sha256_s390_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(init);
module_exit(fini);
module_init(sha256_s390_init);
module_exit(sha256_s390_fini);
MODULE_ALIAS("sha256");

View File

@ -3,6 +3,7 @@
# Linux kernel version: 2.6.25-rc4
# Wed Mar 5 11:22:59 2008
#
CONFIG_SCHED_MC=y
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
CONFIG_LOCKDEP_SUPPORT=y

View File

@ -19,7 +19,7 @@ obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
extra-y += head.o init_task.o vmlinux.lds
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smp.o topology.o
obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o

View File

@ -162,4 +162,77 @@ struct ucontext32 {
compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct __sysctl_args32;
struct stat64_emu31;
struct mmap_arg_struct_emu31;
struct fadvise64_64_args;
struct old_sigaction32;
struct old_sigaction32;
long sys32_chown16(const char __user * filename, u16 user, u16 group);
long sys32_lchown16(const char __user * filename, u16 user, u16 group);
long sys32_fchown16(unsigned int fd, u16 user, u16 group);
long sys32_setregid16(u16 rgid, u16 egid);
long sys32_setgid16(u16 gid);
long sys32_setreuid16(u16 ruid, u16 euid);
long sys32_setuid16(u16 uid);
long sys32_setresuid16(u16 ruid, u16 euid, u16 suid);
long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid);
long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid);
long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid);
long sys32_setfsuid16(u16 uid);
long sys32_setfsgid16(u16 gid);
long sys32_getgroups16(int gidsetsize, u16 __user *grouplist);
long sys32_setgroups16(int gidsetsize, u16 __user *grouplist);
long sys32_getuid16(void);
long sys32_geteuid16(void);
long sys32_getgid16(void);
long sys32_getegid16(void);
long sys32_ipc(u32 call, int first, int second, int third, u32 ptr);
long sys32_truncate64(const char __user * path, unsigned long high,
unsigned long low);
long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low);
long sys32_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
compat_sigset_t __user *oset, size_t sigsetsize);
long sys32_rt_sigpending(compat_sigset_t __user *set, size_t sigsetsize);
long sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo);
long sys32_execve(void);
long sys32_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
long sys32_delete_module(const char __user *name_user, unsigned int flags);
long sys32_gettimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz);
long sys32_settimeofday(struct compat_timeval __user *tv,
struct timezone __user *tz);
long sys32_pause(void);
long sys32_pread64(unsigned int fd, char __user *ubuf, size_t count,
u32 poshi, u32 poslo);
long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
size_t count, u32 poshi, u32 poslo);
compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count);
long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset,
size_t count);
long sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *offset,
s32 count);
long sys32_sysctl(struct __sysctl_args32 __user *args);
long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf);
long sys32_lstat64(char __user * filename,
struct stat64_emu31 __user * statbuf);
long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf);
long sys32_fstatat64(unsigned int dfd, char __user *filename,
struct stat64_emu31 __user* statbuf, int flag);
unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg);
long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg);
long sys32_read(unsigned int fd, char __user * buf, size_t count);
long sys32_write(unsigned int fd, char __user * buf, size_t count);
long sys32_clone(void);
long sys32_fadvise64(int fd, loff_t offset, size_t len, int advise);
long sys32_fadvise64_64(struct fadvise64_64_args __user *args);
long sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
struct old_sigaction32 __user *oact);
long sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
struct sigaction32 __user *oact, size_t sigsetsize);
long sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss);
#endif /* _ASM_S390X_S390_H */

View File

@ -29,6 +29,7 @@
#include <asm/lowcore.h>
#include "compat_linux.h"
#include "compat_ptrace.h"
#include "entry.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@ -428,6 +429,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* Default to using normal stack */
sp = (unsigned long) A(regs->gprs[15]);
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp))
@ -461,6 +466,9 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
goto give_sigsegv;
@ -514,6 +522,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (copy_siginfo_to_user32(&frame->info, info))
goto give_sigsegv;

View File

@ -73,7 +73,7 @@ static ssize_t debug_input(struct file *file, const char __user *user_buf,
static int debug_open(struct inode *inode, struct file *file);
static int debug_close(struct inode *inode, struct file *file);
static debug_info_t* debug_info_create(char *name, int pages_per_area,
int nr_areas, int buf_size);
int nr_areas, int buf_size, mode_t mode);
static void debug_info_get(debug_info_t *);
static void debug_info_put(debug_info_t *);
static int debug_prolog_level_fn(debug_info_t * id,
@ -157,7 +157,7 @@ struct debug_view debug_sprintf_view = {
};
/* used by dump analysis tools to determine version of debug feature */
unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
static unsigned int __used debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
@ -327,7 +327,8 @@ debug_info_free(debug_info_t* db_info){
*/
static debug_info_t*
debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size,
mode_t mode)
{
debug_info_t* rc;
@ -336,6 +337,8 @@ debug_info_create(char *name, int pages_per_area, int nr_areas, int buf_size)
if(!rc)
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
rc->debugfs_root_entry = debugfs_create_dir(rc->name,
debug_debugfs_root_entry);
@ -676,23 +679,30 @@ debug_close(struct inode *inode, struct file *file)
}
/*
* debug_register:
* - creates and initializes debug area for the caller
* - returns handle for debug area
* debug_register_mode:
* - Creates and initializes debug area for the caller
* The mode parameter allows to specify access rights for the s390dbf files
* - Returns handle for debug area
*/
debug_info_t*
debug_register (char *name, int pages_per_area, int nr_areas, int buf_size)
debug_info_t *debug_register_mode(char *name, int pages_per_area, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid)
{
debug_info_t *rc = NULL;
/* Since debugfs currently does not support uid/gid other than root, */
/* we do not allow gid/uid != 0 until we get support for that. */
if ((uid != 0) || (gid != 0))
printk(KERN_WARNING "debug: Warning - Currently only uid/gid "
"= 0 are supported. Using root as owner now!");
if (!initialized)
BUG();
mutex_lock(&debug_mutex);
/* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size);
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if(!rc)
goto out;
debug_register_view(rc, &debug_level_view);
@ -705,6 +715,20 @@ out:
mutex_unlock(&debug_mutex);
return rc;
}
EXPORT_SYMBOL(debug_register_mode);
/*
* debug_register:
* - creates and initializes debug area for the caller
* - returns handle for debug area
*/
debug_info_t *debug_register(char *name, int pages_per_area, int nr_areas,
int buf_size)
{
return debug_register_mode(name, pages_per_area, nr_areas, buf_size,
S_IRUSR | S_IWUSR, 0, 0);
}
/*
* debug_unregister:
@ -1073,15 +1097,16 @@ debug_register_view(debug_info_t * id, struct debug_view *view)
int rc = 0;
int i;
unsigned long flags;
mode_t mode = S_IFREG;
mode_t mode;
struct dentry *pde;
if (!id)
goto out;
if (view->prolog_proc || view->format_proc || view->header_proc)
mode |= S_IRUSR;
if (view->input_proc)
mode |= S_IWUSR;
mode = (id->mode | S_IFREG) & ~S_IXUGO;
if (!(view->prolog_proc || view->format_proc || view->header_proc))
mode &= ~(S_IRUSR | S_IRGRP | S_IROTH);
if (!view->input_proc)
mode &= ~(S_IWUSR | S_IWGRP | S_IWOTH);
pde = debugfs_create_file(view->name, mode, id->debugfs_root_entry,
id , &debug_file_ops);
if (!pde){

View File

@ -21,6 +21,7 @@
#include <asm/setup.h>
#include <asm/cpcmd.h>
#include <asm/sclp.h>
#include "entry.h"
/*
* Create a Kernel NSS if the SAVESYS= parameter is defined

60
arch/s390/kernel/entry.h Normal file
View File

@ -0,0 +1,60 @@
#ifndef _ENTRY_H
#define _ENTRY_H
#include <linux/types.h>
#include <linux/signal.h>
#include <asm/ptrace.h>
typedef void pgm_check_handler_t(struct pt_regs *, long);
extern pgm_check_handler_t *pgm_check_table[128];
pgm_check_handler_t do_protection_exception;
pgm_check_handler_t do_dat_exception;
extern int sysctl_userprocess_debug;
void do_single_step(struct pt_regs *regs);
void syscall_trace(struct pt_regs *regs, int entryexit);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
int handle_signal32(unsigned long sig, struct k_sigaction *ka,
siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
void do_extint(struct pt_regs *regs, unsigned short code);
int __cpuinit start_secondary(void *cpuvoid);
void __init startup_init(void);
void die(const char * str, struct pt_regs * regs, long err);
struct new_utsname;
struct mmap_arg_struct;
struct fadvise64_64_args;
struct old_sigaction;
struct sel_arg_struct;
long sys_pipe(unsigned long __user *fildes);
long sys_mmap2(struct mmap_arg_struct __user *arg);
long old_mmap(struct mmap_arg_struct __user *arg);
long sys_ipc(uint call, int first, unsigned long second,
unsigned long third, void __user *ptr);
long s390x_newuname(struct new_utsname __user *name);
long s390x_personality(unsigned long personality);
long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
size_t len, int advice);
long s390_fadvise64_64(struct fadvise64_64_args __user *args);
long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
long sys_fork(void);
long sys_clone(void);
long sys_vfork(void);
void execve_tail(void);
long sys_execve(void);
int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
long sys32_rt_sigreturn(void);
long old_select(struct sel_arg_struct __user *arg);
long sys_ptrace(long request, long pid, long addr, long data);
#endif /* _ENTRY_H */

View File

@ -475,6 +475,7 @@ pgm_check_handler:
pgm_no_vtime:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
TRACE_IRQS_OFF
lgf %r3,__LC_PGM_ILC # load program interruption code
lghi %r8,0x7f
@ -847,6 +848,7 @@ stack_overflow:
je 0f
la %r1,__LC_SAVE_AREA+32
0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
mvc SP_ARGS(8,%r15),__LC_LAST_BREAK
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
la %r2,SP_PTREGS(%r15) # load pt_regs
jg kernel_stack_overflow

View File

@ -655,7 +655,7 @@ static struct kobj_attribute reipl_type_attr =
static struct kset *reipl_kset;
void reipl_run(struct shutdown_trigger *trigger)
static void reipl_run(struct shutdown_trigger *trigger)
{
struct ccw_dev_id devid;
static char buf[100];

View File

@ -360,7 +360,7 @@ no_kprobe:
* - When the probed function returns, this probe
* causes the handlers to fire
*/
void kretprobe_trampoline_holder(void)
static void __used kretprobe_trampoline_holder(void)
{
asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline: bcr 0,0\n");

View File

@ -36,6 +36,8 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/utsname.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@ -44,6 +46,7 @@
#include <asm/irq.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@ -76,6 +79,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
* Need to know about CPUs going idle?
*/
static ATOMIC_NOTIFIER_HEAD(idle_chain);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
int register_idle_notifier(struct notifier_block *nb)
{
@ -89,9 +93,33 @@ int unregister_idle_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_idle_notifier);
void do_monitor_call(struct pt_regs *regs, long interruption_code)
static int s390_idle_enter(void)
{
struct s390_idle_data *idle;
int nr_calls = 0;
void *hcpu;
int rc;
hcpu = (void *)(long)smp_processor_id();
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
return rc;
}
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
return NOTIFY_OK;
}
void s390_idle_leave(void)
{
#ifdef CONFIG_SMP
struct s390_idle_data *idle;
idle = &__get_cpu_var(s390_idle);
@ -99,10 +127,6 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
idle->idle_time += get_clock() - idle->idle_enter;
idle->in_idle = 0;
spin_unlock(&idle->lock);
#endif
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
@ -113,61 +137,30 @@ extern void s390_handle_mcck(void);
*/
static void default_idle(void)
{
int cpu, rc;
int nr_calls = 0;
void *hcpu;
#ifdef CONFIG_SMP
struct s390_idle_data *idle;
#endif
/* CPU is going idle. */
cpu = smp_processor_id();
hcpu = (void *)(long)cpu;
local_irq_disable();
if (need_resched()) {
local_irq_enable();
return;
}
rc = __atomic_notifier_call_chain(&idle_chain, S390_CPU_IDLE, hcpu, -1,
&nr_calls);
if (rc == NOTIFY_BAD) {
nr_calls--;
__atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu, nr_calls, NULL);
if (s390_idle_enter() == NOTIFY_BAD) {
local_irq_enable();
return;
}
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(cpu)) {
if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched();
cpu_die();
}
#endif
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE,
hcpu);
s390_idle_leave();
local_irq_enable();
s390_handle_mcck();
return;
}
#ifdef CONFIG_SMP
idle = &__get_cpu_var(s390_idle);
spin_lock(&idle->lock);
idle->idle_count++;
idle->in_idle = 1;
idle->idle_enter = get_clock();
spin_unlock(&idle->lock);
#endif
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
@ -177,9 +170,10 @@ static void default_idle(void)
void cpu_idle(void)
{
for (;;) {
tick_nohz_stop_sched_tick();
while (!need_resched())
default_idle();
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
@ -201,6 +195,7 @@ void show_regs(struct pt_regs *regs)
/* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
show_trace(NULL, (unsigned long *) regs->gprs[15]);
show_last_breaking_event(regs);
}
extern void kernel_thread_starter(void);

View File

@ -41,6 +41,7 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "entry.h"
#ifdef CONFIG_COMPAT
#include "compat_ptrace.h"

View File

@ -13,11 +13,12 @@
#include <linux/errno.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/lowcore.h>
#include <asm/s390_ext.h>
#include <asm/irq_regs.h>
#include <asm/irq.h>
#include "entry.h"
/*
* ext_int_hash[index] is the start of the list for all external interrupts
@ -119,13 +120,10 @@ void do_extint(struct pt_regs *regs, unsigned short code)
old_regs = set_irq_regs(regs);
irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
/**
* Make sure that the i/o interrupt did not "overtake"
* the last HZ timer interrupt.
*/
account_ticks(S390_lowcore.int_clock);
s390_idle_check();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) {

View File

@ -39,6 +39,7 @@
#include <linux/pfn.h>
#include <linux/ctype.h>
#include <linux/reboot.h>
#include <linux/topology.h>
#include <asm/ipl.h>
#include <asm/uaccess.h>
@ -427,7 +428,7 @@ setup_lowcore(void)
lc->io_new_psw.mask = psw_kernel_bits;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->ipl_device = S390_lowcore.ipl_device;
lc->jiffy_timer = -1LL;
lc->clock_comparator = -1ULL;
lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
lc->async_stack = (unsigned long)
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
@ -687,7 +688,7 @@ static __init unsigned int stfl(void)
return S390_lowcore.stfl_fac_list;
}
static __init int stfle(unsigned long long *list, int doublewords)
static int __init __stfle(unsigned long long *list, int doublewords)
{
typedef struct { unsigned long long _[doublewords]; } addrtype;
register unsigned long __nr asm("0") = doublewords - 1;
@ -697,6 +698,13 @@ static __init int stfle(unsigned long long *list, int doublewords)
return __nr + 1;
}
int __init stfle(unsigned long long *list, int doublewords)
{
if (!(stfl() & (1UL << 24)))
return -EOPNOTSUPP;
return __stfle(list, doublewords);
}
/*
* Setup hardware capabilities.
*/
@ -741,7 +749,7 @@ static void __init setup_hwcaps(void)
* HWCAP_S390_DFP bit 6.
*/
if ((elf_hwcap & (1UL << 2)) &&
stfle(&facility_list_extended, 1) > 0) {
__stfle(&facility_list_extended, 1) > 0) {
if (facility_list_extended & (1ULL << (64 - 43)))
elf_hwcap |= 1UL << 6;
}
@ -823,6 +831,7 @@ setup_arch(char **cmdline_p)
cpu_init();
__cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
s390_init_cpu_topology();
/*
* Setup capabilities (ELF_HWCAP & ELF_PLATFORM).

View File

@ -27,6 +27,7 @@
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/lowcore.h>
#include "entry.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@ -235,6 +236,10 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
/* Default to using normal stack */
sp = regs->gprs[15];
/* Overflow on alternate signal stack gives SIGSEGV. */
if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
return (void __user *) -1UL;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(sp))
@ -270,6 +275,9 @@ static int setup_frame(int sig, struct k_sigaction *ka,
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
goto give_sigsegv;
@ -327,6 +335,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
goto give_sigsegv;
if (frame == (void __user *) -1UL)
goto give_sigsegv;
if (copy_siginfo_to_user(&frame->info, info))
goto give_sigsegv;
@ -474,11 +485,6 @@ void do_signal(struct pt_regs *regs)
int ret;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_31BIT)) {
extern int handle_signal32(unsigned long sig,
struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset,
struct pt_regs *regs);
ret = handle_signal32(signr, &ka, &info, oldset, regs);
}
else

View File

@ -44,6 +44,7 @@
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include <asm/cpu.h>
#include "entry.h"
/*
* An array with a pointer the lowcore of every CPU.
@ -67,13 +68,12 @@ enum s390_cpu_state {
CPU_STATE_CONFIGURED,
};
#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_MUTEX(smp_cpu_state_mutex);
#endif
DEFINE_MUTEX(smp_cpu_state_mutex);
int smp_cpu_polarization[NR_CPUS];
static int smp_cpu_state[NR_CPUS];
static int cpu_management;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
static void smp_ext_bitcall(int, ec_bit_sig);
@ -298,7 +298,7 @@ static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
/*
* this function sends a 'purge tlb' signal to another CPU.
*/
void smp_ptlb_callback(void *info)
static void smp_ptlb_callback(void *info)
{
__tlb_flush_local();
}
@ -456,6 +456,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
if (!cpu_stopped(logical_cpu))
continue;
cpu_set(logical_cpu, cpu_present_map);
@ -489,6 +490,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail)
if (cpu_known(cpu_id))
continue;
__cpu_logical_map[logical_cpu] = cpu_id;
smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
cpu_set(logical_cpu, cpu_present_map);
if (cpu >= info->configured)
smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
@ -846,6 +848,7 @@ void __init smp_prepare_boot_cpu(void)
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
smp_cpu_state[0] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
spin_lock_init(&(&__get_cpu_var(s390_idle))->lock);
}
@ -897,15 +900,19 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
case 0:
if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
if (!rc)
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_STANDBY;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
}
}
break;
case 1:
if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
if (!rc)
if (!rc) {
smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
}
}
break;
default:
@ -919,6 +926,34 @@ out:
static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
#endif /* CONFIG_HOTPLUG_CPU */
static ssize_t cpu_polarization_show(struct sys_device *dev, char *buf)
{
int cpu = dev->id;
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
switch (smp_cpu_polarization[cpu]) {
case POLARIZATION_HRZ:
count = sprintf(buf, "horizontal\n");
break;
case POLARIZATION_VL:
count = sprintf(buf, "vertical:low\n");
break;
case POLARIZATION_VM:
count = sprintf(buf, "vertical:medium\n");
break;
case POLARIZATION_VH:
count = sprintf(buf, "vertical:high\n");
break;
default:
count = sprintf(buf, "unknown\n");
break;
}
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
static ssize_t show_cpu_address(struct sys_device *dev, char *buf)
{
return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
@ -931,6 +966,7 @@ static struct attribute *cpu_common_attrs[] = {
&attr_configure.attr,
#endif
&attr_address.attr,
&attr_polarization.attr,
NULL,
};
@ -1075,11 +1111,48 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
out:
put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex);
if (!cpus_empty(newcpus))
topology_schedule_update();
return rc ? rc : count;
}
static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
#endif /* CONFIG_HOTPLUG_CPU */
static ssize_t dispatching_show(struct sys_device *dev, char *buf)
{
ssize_t count;
mutex_lock(&smp_cpu_state_mutex);
count = sprintf(buf, "%d\n", cpu_management);
mutex_unlock(&smp_cpu_state_mutex);
return count;
}
static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
size_t count)
{
int val, rc;
char delim;
if (sscanf(buf, "%d %c", &val, &delim) != 1)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
rc = 0;
mutex_lock(&smp_cpu_state_mutex);
get_online_cpus();
if (cpu_management == val)
goto out;
rc = topology_set_cpu_management(val);
if (!rc)
cpu_management = val;
out:
put_online_cpus();
mutex_unlock(&smp_cpu_state_mutex);
return rc ? rc : count;
}
static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
static int __init topology_init(void)
{
int cpu;
@ -1093,6 +1166,10 @@ static int __init topology_init(void)
if (rc)
return rc;
#endif
rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
&attr_dispatching.attr);
if (rc)
return rc;
for_each_present_cpu(cpu) {
rc = smp_add_present_cpu(cpu);
if (rc)

View File

@ -29,8 +29,8 @@
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
#include <asm/uaccess.h>
#include "entry.h"
/*
* sys_pipe() is the normal C calling standard for creating

View File

@ -30,7 +30,7 @@
#include <linux/timex.h>
#include <linux/notifier.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
@ -39,6 +39,7 @@
#include <asm/irq_regs.h>
#include <asm/timer.h>
#include <asm/etr.h>
#include <asm/cio.h>
/* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
@ -57,16 +58,16 @@
static ext_int_info_t ext_int_info_cc;
static ext_int_info_t ext_int_etr_cc;
static u64 init_timer_cc;
static u64 jiffies_timer_cc;
static u64 xtime_cc;
static DEFINE_PER_CPU(struct clock_event_device, comparators);
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return ((get_clock() - jiffies_timer_cc) * 125) >> 9;
return ((get_clock_xt() - jiffies_timer_cc) * 125) >> 9;
}
/*
@ -95,162 +96,40 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
#define s390_do_profile() do { ; } while(0)
#endif /* CONFIG_PROFILING */
/*
* Advance the per cpu tick counter up to the time given with the
* "time" argument. The per cpu update consists of accounting
* the virtual cpu time, calling update_process_times and calling
* the profiling hook. If xtime is before time it is advanced as well.
*/
void account_ticks(u64 time)
void clock_comparator_work(void)
{
__u32 ticks;
__u64 tmp;
/* Calculate how many ticks have passed. */
if (time < S390_lowcore.jiffy_timer)
return;
tmp = time - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
S390_lowcore.jiffy_timer +=
CLK_TICKS_PER_JIFFY * (__u64) ticks;
} else if (tmp >= CLK_TICKS_PER_JIFFY) {
ticks = 2;
S390_lowcore.jiffy_timer += 2*CLK_TICKS_PER_JIFFY;
} else {
ticks = 1;
S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
}
#ifdef CONFIG_SMP
/*
* Do not rely on the boot cpu to do the calls to do_timer.
* Spread it over all cpus instead.
*/
write_seqlock(&xtime_lock);
if (S390_lowcore.jiffy_timer > xtime_cc) {
__u32 xticks;
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
xticks = __div(tmp, CLK_TICKS_PER_JIFFY);
xtime_cc += (__u64) xticks * CLK_TICKS_PER_JIFFY;
} else {
xticks = 1;
xtime_cc += CLK_TICKS_PER_JIFFY;
}
do_timer(xticks);
}
write_sequnlock(&xtime_lock);
#else
do_timer(ticks);
#endif
while (ticks--)
update_process_times(user_mode(get_irq_regs()));
struct clock_event_device *cd;
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cd = &__get_cpu_var(comparators);
cd->event_handler(cd);
s390_do_profile();
}
#ifdef CONFIG_NO_IDLE_HZ
#ifdef CONFIG_NO_IDLE_HZ_INIT
int sysctl_hz_timer = 0;
#else
int sysctl_hz_timer = 1;
#endif
/*
* Stop the HZ tick on the current CPU.
* Only cpu_idle may call this function.
* Fixup the clock comparator.
*/
static void stop_hz_timer(void)
static void fixup_clock_comparator(unsigned long long delta)
{
unsigned long flags;
unsigned long seq, next;
__u64 timer, todval;
int cpu = smp_processor_id();
if (sysctl_hz_timer != 0)
/* If nobody is waiting there's nothing to fix. */
if (S390_lowcore.clock_comparator == -1ULL)
return;
cpu_set(cpu, nohz_cpu_mask);
/*
* Leave the clock comparator set up for the next timer
* tick if either rcu or a softirq is pending.
*/
if (rcu_needs_cpu(cpu) || local_softirq_pending()) {
cpu_clear(cpu, nohz_cpu_mask);
return;
}
/*
* This cpu is going really idle. Set up the clock comparator
* for the next event.
*/
next = next_timer_interrupt();
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
timer = ((__u64) next) - ((__u64) jiffies) + jiffies_64;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
todval = -1ULL;
/* Be careful about overflows. */
if (timer < (-1ULL / CLK_TICKS_PER_JIFFY)) {
timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
if (timer >= jiffies_timer_cc)
todval = timer;
}
set_clock_comparator(todval);
S390_lowcore.clock_comparator += delta;
set_clock_comparator(S390_lowcore.clock_comparator);
}
/*
* Start the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
static void start_hz_timer(void)
static int s390_next_event(unsigned long delta,
struct clock_event_device *evt)
{
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
account_ticks(get_clock());
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
cpu_clear(smp_processor_id(), nohz_cpu_mask);
S390_lowcore.clock_comparator = get_clock() + delta;
set_clock_comparator(S390_lowcore.clock_comparator);
return 0;
}
static int nohz_idle_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static void s390_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (action) {
case S390_CPU_IDLE:
stop_hz_timer();
break;
case S390_CPU_NOT_IDLE:
start_hz_timer();
break;
}
return NOTIFY_OK;
}
static struct notifier_block nohz_idle_nb = {
.notifier_call = nohz_idle_notify,
};
static void __init nohz_init(void)
{
if (register_idle_notifier(&nohz_idle_nb))
panic("Couldn't register idle notifier");
}
#endif
/*
* Set up per cpu jiffy timer and set the clock comparator.
*/
static void setup_jiffy_timer(void)
{
/* Set up clock comparator to next jiffy. */
S390_lowcore.jiffy_timer =
jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
}
/*
@ -259,7 +138,26 @@ static void setup_jiffy_timer(void)
*/
void init_cpu_timer(void)
{
setup_jiffy_timer();
struct clock_event_device *cd;
int cpu;
S390_lowcore.clock_comparator = -1ULL;
set_clock_comparator(S390_lowcore.clock_comparator);
cpu = smp_processor_id();
cd = &per_cpu(comparators, cpu);
cd->name = "comparator";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
cd->mult = 16777;
cd->shift = 12;
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
cd->cpumask = cpumask_of_cpu(cpu);
cd->set_next_event = s390_next_event;
cd->set_mode = s390_set_mode;
clockevents_register_device(cd);
/* Enable clock comparator timer interrupt. */
__ctl_set_bit(0,11);
@ -270,8 +168,6 @@ void init_cpu_timer(void)
static void clock_comparator_interrupt(__u16 code)
{
/* set clock comparator for next tick */
set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
}
static void etr_reset(void);
@ -316,8 +212,9 @@ static struct clocksource clocksource_tod = {
*/
void __init time_init(void)
{
u64 init_timer_cc;
init_timer_cc = reset_tod_clock();
xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
/* set xtime */
@ -342,10 +239,6 @@ void __init time_init(void)
/* Enable TOD clock interrupts on the boot cpu. */
init_cpu_timer();
#ifdef CONFIG_NO_IDLE_HZ
nohz_init();
#endif
#ifdef CONFIG_VIRT_TIMER
vtime_init();
#endif
@ -699,53 +592,49 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
}
/*
* The time is "clock". xtime is what we think the time is.
* The time is "clock". old is what we think the time is.
* Adjust the value by a multiple of jiffies and add the delta to ntp.
* "delay" is an approximation how long the synchronization took. If
* the time correction is positive, then "delay" is subtracted from
* the time difference and only the remaining part is passed to ntp.
*/
static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
static unsigned long long etr_adjust_time(unsigned long long old,
unsigned long long clock,
unsigned long long delay)
{
unsigned long long delta, ticks;
struct timex adjust;
/*
* We don't have to take the xtime lock because the cpu
* executing etr_adjust_time is running disabled in
* tasklet context and all other cpus are looping in
* etr_sync_cpu_start.
*/
if (clock > xtime_cc) {
if (clock > old) {
/* It is later than we thought. */
delta = ticks = clock - xtime_cc;
delta = ticks = clock - old;
delta = ticks = (delta < delay) ? 0 : delta - delay;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
init_timer_cc = init_timer_cc + delta;
jiffies_timer_cc = jiffies_timer_cc + delta;
xtime_cc = xtime_cc + delta;
adjust.offset = ticks * (1000000 / HZ);
} else {
/* It is earlier than we thought. */
delta = ticks = xtime_cc - clock;
delta = ticks = old - clock;
delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
init_timer_cc = init_timer_cc - delta;
jiffies_timer_cc = jiffies_timer_cc - delta;
xtime_cc = xtime_cc - delta;
delta = -delta;
adjust.offset = -ticks * (1000000 / HZ);
}
jiffies_timer_cc += delta;
if (adjust.offset != 0) {
printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
adjust.offset);
adjust.modes = ADJ_OFFSET_SINGLESHOT;
do_adjtimex(&adjust);
}
return delta;
}
static struct {
int in_sync;
unsigned long long fixup_cc;
} etr_sync;
static void etr_sync_cpu_start(void *dummy)
{
int *in_sync = dummy;
etr_enable_sync_clock();
/*
* This looks like a busy wait loop but it isn't. etr_sync_cpus
@ -753,7 +642,7 @@ static void etr_sync_cpu_start(void *dummy)
* __udelay will stop the cpu on an enabled wait psw until the
* TOD is running again.
*/
while (*in_sync == 0) {
while (etr_sync.in_sync == 0) {
__udelay(1);
/*
* A different cpu changes *in_sync. Therefore use
@ -761,14 +650,14 @@ static void etr_sync_cpu_start(void *dummy)
*/
barrier();
}
if (*in_sync != 1)
if (etr_sync.in_sync != 1)
/* Didn't work. Clear per-cpu in sync bit again. */
etr_disable_sync_clock(NULL);
/*
* This round of TOD syncing is done. Set the clock comparator
* to the next tick and let the processor continue.
*/
setup_jiffy_timer();
fixup_clock_comparator(etr_sync.fixup_cc);
}
static void etr_sync_cpu_end(void *dummy)
@ -783,8 +672,8 @@ static void etr_sync_cpu_end(void *dummy)
static int etr_sync_clock(struct etr_aib *aib, int port)
{
struct etr_aib *sync_port;
unsigned long long clock, delay;
int in_sync, follows;
unsigned long long clock, old_clock, delay, delta;
int follows;
int rc;
/* Check if the current aib is adjacent to the sync port aib. */
@ -799,9 +688,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
* successfully synced the clock. smp_call_function will
* return after all other cpus are in etr_sync_cpu_start.
*/
in_sync = 0;
memset(&etr_sync, 0, sizeof(etr_sync));
preempt_disable();
smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
smp_call_function(etr_sync_cpu_start, NULL, 0, 0);
local_irq_disable();
etr_enable_sync_clock();
@ -809,6 +698,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
__ctl_set_bit(14, 21);
__ctl_set_bit(0, 29);
clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
old_clock = get_clock();
if (set_clock(clock) == 0) {
__udelay(1); /* Wait for the clock to start. */
__ctl_clear_bit(0, 29);
@ -817,16 +707,17 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
/* Adjust Linux timing variables. */
delay = (unsigned long long)
(aib->edf2.etv - sync_port->edf2.etv) << 32;
etr_adjust_time(clock, delay);
setup_jiffy_timer();
delta = etr_adjust_time(old_clock, clock, delay);
etr_sync.fixup_cc = delta;
fixup_clock_comparator(delta);
/* Verify that the clock is properly set. */
if (!etr_aib_follows(sync_port, aib, port)) {
/* Didn't work. */
etr_disable_sync_clock(NULL);
in_sync = -EAGAIN;
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
} else {
in_sync = 1;
etr_sync.in_sync = 1;
rc = 0;
}
} else {
@ -834,7 +725,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
__ctl_clear_bit(0, 29);
__ctl_clear_bit(14, 21);
etr_disable_sync_clock(NULL);
in_sync = -EAGAIN;
etr_sync.in_sync = -EAGAIN;
rc = -EAGAIN;
}
local_irq_enable();

314
arch/s390/kernel/topology.c Normal file
View File

@ -0,0 +1,314 @@
/*
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
#include <asm/sysinfo.h>
#define CPU_BITS 64
#define NR_MAG 6
#define PTF_HORIZONTAL (0UL)
#define PTF_VERTICAL (1UL)
#define PTF_CHECK (2UL)
struct tl_cpu {
unsigned char reserved0[4];
unsigned char :6;
unsigned char pp:2;
unsigned char reserved1;
unsigned short origin;
unsigned long mask[CPU_BITS / BITS_PER_LONG];
};
struct tl_container {
unsigned char reserved[8];
};
union tl_entry {
unsigned char nl;
struct tl_cpu cpu;
struct tl_container container;
};
struct tl_info {
unsigned char reserved0[2];
unsigned short length;
unsigned char mag[NR_MAG];
unsigned char reserved1;
unsigned char mnest;
unsigned char reserved2[4];
union tl_entry tle[0];
};
struct core_info {
struct core_info *next;
cpumask_t mask;
};
static void topology_work_fn(struct work_struct *work);
static struct tl_info *tl_info;
static struct core_info core_info;
static int machine_has_topology;
static int machine_has_topology_irq;
static struct timer_list topology_timer;
static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn);
cpumask_t cpu_coregroup_map(unsigned int cpu)
{
struct core_info *core = &core_info;
cpumask_t mask;
cpus_clear(mask);
if (!machine_has_topology)
return cpu_present_map;
mutex_lock(&smp_cpu_state_mutex);
while (core) {
if (cpu_isset(cpu, core->mask)) {
mask = core->mask;
break;
}
core = core->next;
}
mutex_unlock(&smp_cpu_state_mutex);
if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu);
return mask;
}
static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
{
unsigned int cpu;
for (cpu = find_first_bit(&tl_cpu->mask[0], CPU_BITS);
cpu < CPU_BITS;
cpu = find_next_bit(&tl_cpu->mask[0], CPU_BITS, cpu + 1))
{
unsigned int rcpu, lcpu;
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
if (__cpu_logical_map[lcpu] == rcpu) {
cpu_set(lcpu, core->mask);
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
}
}
}
static void clear_cores(void)
{
struct core_info *core = &core_info;
while (core) {
cpus_clear(core->mask);
core = core->next;
}
}
static union tl_entry *next_tle(union tl_entry *tle)
{
if (tle->nl)
return (union tl_entry *)((struct tl_container *)tle + 1);
else
return (union tl_entry *)((struct tl_cpu *)tle + 1);
}
static void tl_to_cores(struct tl_info *info)
{
union tl_entry *tle, *end;
struct core_info *core = &core_info;
mutex_lock(&smp_cpu_state_mutex);
clear_cores();
tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length);
while (tle < end) {
switch (tle->nl) {
case 5:
case 4:
case 3:
case 2:
break;
case 1:
core = core->next;
break;
case 0:
add_cpus_to_core(&tle->cpu, core);
break;
default:
clear_cores();
machine_has_topology = 0;
return;
}
tle = next_tle(tle);
}
mutex_unlock(&smp_cpu_state_mutex);
}
static void topology_update_polarization_simple(void)
{
int cpu;
mutex_lock(&smp_cpu_state_mutex);
for_each_present_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
mutex_unlock(&smp_cpu_state_mutex);
}
static int ptf(unsigned long fc)
{
int rc;
asm volatile(
" .insn rre,0xb9a20000,%1,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (rc)
: "d" (fc) : "cc");
return rc;
}
int topology_set_cpu_management(int fc)
{
int cpu;
int rc;
if (!machine_has_topology)
return -EOPNOTSUPP;
if (fc)
rc = ptf(PTF_VERTICAL);
else
rc = ptf(PTF_HORIZONTAL);
if (rc)
return -EBUSY;
for_each_present_cpu(cpu)
smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
return rc;
}
void arch_update_cpu_topology(void)
{
struct tl_info *info = tl_info;
struct sys_device *sysdev;
int cpu;
if (!machine_has_topology) {
topology_update_polarization_simple();
return;
}
stsi(info, 15, 1, 2);
tl_to_cores(info);
for_each_online_cpu(cpu) {
sysdev = get_cpu_sysdev(cpu);
kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
}
}
static void topology_work_fn(struct work_struct *work)
{
arch_reinit_sched_domains();
}
void topology_schedule_update(void)
{
schedule_work(&topology_work);
}
static void topology_timer_fn(unsigned long ignored)
{
if (ptf(PTF_CHECK))
topology_schedule_update();
set_topology_timer();
}
static void set_topology_timer(void)
{
topology_timer.function = topology_timer_fn;
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
add_timer(&topology_timer);
}
static void topology_interrupt(__u16 code)
{
schedule_work(&topology_work);
}
static int __init init_topology_update(void)
{
int rc;
if (!machine_has_topology) {
topology_update_polarization_simple();
return 0;
}
init_timer_deferrable(&topology_timer);
if (machine_has_topology_irq) {
rc = register_external_interrupt(0x2005, topology_interrupt);
if (rc)
return rc;
ctl_set_bit(0, 8);
}
else
set_topology_timer();
return 0;
}
__initcall(init_topology_update);
void __init s390_init_cpu_topology(void)
{
unsigned long long facility_bits;
struct tl_info *info;
struct core_info *core;
int nr_cores;
int i;
if (stfle(&facility_bits, 1) <= 0)
return;
if (!(facility_bits & (1ULL << 52)) || !(facility_bits & (1ULL << 61)))
return;
machine_has_topology = 1;
if (facility_bits & (1ULL << 51))
machine_has_topology_irq = 1;
tl_info = alloc_bootmem_pages(PAGE_SIZE);
if (!tl_info)
goto error;
info = tl_info;
stsi(info, 15, 1, 2);
nr_cores = info->mag[NR_MAG - 2];
for (i = 0; i < info->mnest - 2; i++)
nr_cores *= info->mag[NR_MAG - 3 - i];
printk(KERN_INFO "CPU topology:");
for (i = 0; i < NR_MAG; i++)
printk(" %d", info->mag[i]);
printk(" / %d\n", info->mnest);
core = &core_info;
for (i = 0; i < nr_cores; i++) {
core->next = alloc_bootmem(sizeof(struct core_info));
core = core->next;
if (!core)
goto error;
}
return;
error:
machine_has_topology = 0;
machine_has_topology_irq = 0;
}

View File

@ -42,11 +42,8 @@
#include <asm/s390_ext.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
#include "entry.h"
/* Called from entry.S only */
extern void handle_per_exception(struct pt_regs *regs);
typedef void pgm_check_handler_t(struct pt_regs *, long);
pgm_check_handler_t *pgm_check_table[128];
#ifdef CONFIG_SYSCTL
@ -59,7 +56,6 @@ int sysctl_userprocess_debug = 0;
extern pgm_check_handler_t do_protection_exception;
extern pgm_check_handler_t do_dat_exception;
extern pgm_check_handler_t do_monitor_call;
extern pgm_check_handler_t do_asce_exception;
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
@ -138,7 +134,6 @@ void show_trace(struct task_struct *task, unsigned long *stack)
else
__show_trace(sp, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
printk("\n");
if (!task)
task = current;
debug_show_held_locks(task);
@ -166,6 +161,15 @@ void show_stack(struct task_struct *task, unsigned long *sp)
show_trace(task, sp);
}
#ifdef CONFIG_64BIT
void show_last_breaking_event(struct pt_regs *regs)
{
printk("Last Breaking-Event-Address:\n");
printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
}
#endif
/*
* The architecture-independent dump_stack generator
*/
@ -739,6 +743,5 @@ void __init trap_init(void)
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &space_switch_exception;
pgm_check_table[0x1D] = &hfp_sqrt_exception;
pgm_check_table[0x40] = &do_monitor_call;
pfault_irq_init();
}

View File

@ -34,7 +34,7 @@ void __delay(unsigned long loops)
*/
void __udelay(unsigned long usecs)
{
u64 end, time, jiffy_timer = 0;
u64 end, time, old_cc = 0;
unsigned long flags, cr0, mask, dummy;
int irq_context;
@ -43,8 +43,8 @@ void __udelay(unsigned long usecs)
local_bh_disable();
local_irq_save(flags);
if (raw_irqs_disabled_flags(flags)) {
jiffy_timer = S390_lowcore.jiffy_timer;
S390_lowcore.jiffy_timer = -1ULL - (4096 << 12);
old_cc = S390_lowcore.clock_comparator;
S390_lowcore.clock_comparator = -1ULL;
__ctl_store(cr0, 0, 0);
dummy = (cr0 & 0xffff00e0) | 0x00000800;
__ctl_load(dummy , 0, 0);
@ -55,8 +55,8 @@ void __udelay(unsigned long usecs)
end = get_clock() + ((u64) usecs << 12);
do {
time = end < S390_lowcore.jiffy_timer ?
end : S390_lowcore.jiffy_timer;
time = end < S390_lowcore.clock_comparator ?
end : S390_lowcore.clock_comparator;
set_clock_comparator(time);
trace_hardirqs_on();
__load_psw_mask(mask);
@ -65,10 +65,10 @@ void __udelay(unsigned long usecs)
if (raw_irqs_disabled_flags(flags)) {
__ctl_load(cr0, 0, 0);
S390_lowcore.jiffy_timer = jiffy_timer;
S390_lowcore.clock_comparator = old_cc;
}
if (!irq_context)
_local_bh_enable();
set_clock_comparator(S390_lowcore.jiffy_timer);
set_clock_comparator(S390_lowcore.clock_comparator);
local_irq_restore(flags);
}

View File

@ -302,6 +302,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
pte_t *pte_from, *pte_to;
int write_user;
if (segment_eq(get_fs(), KERNEL_DS)) {
memcpy((void __force *) to, (void __force *) from, n);
return 0;
}
done = 0;
retry:
spin_lock(&mm->page_table_lock);
@ -361,18 +365,10 @@ fault:
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc" );
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{
int oldval = 0, newval, ret;
spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
}
get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock);
switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op("lr %2,%5\n",
@ -397,17 +393,17 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
default:
ret = -ENOSYS;
}
put_page(virt_to_page(uaddr));
*old = oldval;
if (ret == 0)
*old = oldval;
return ret;
}
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
{
int ret;
if (!current->mm)
return -EFAULT;
if (segment_eq(get_fs(), KERNEL_DS))
return __futex_atomic_op_pt(op, uaddr, oparg, old);
spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
@ -416,13 +412,40 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
}
get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock);
asm volatile(" cs %1,%4,0(%5)\n"
"0: lr %0,%1\n"
"1:\n"
EX_TABLE(0b,1b)
ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
put_page(virt_to_page(uaddr));
return ret;
}
static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
asm volatile("0: cs %1,%4,0(%5)\n"
"1: lr %0,%1\n"
"2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" );
return ret;
}
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
{
int ret;
if (segment_eq(get_fs(), KERNEL_DS))
return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) {
spin_unlock(&current->mm->page_table_lock);
return -EFAULT;
}
get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock);
ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval);
put_page(virt_to_page(uaddr));
return ret;
}

View File

@ -289,22 +289,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
switch (rc) {
case 0:
break;
case -ENOSPC:
PRINT_WARN("segment_load: not loading segment %s - overlaps "
"storage/segment\n", name);
if (rc)
goto out_free;
case -ERANGE:
PRINT_WARN("segment_load: not loading segment %s - exceeds "
"kernel mapping range\n", name);
goto out_free;
default:
PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
name, rc);
goto out_free;
}
seg->res = kzalloc(sizeof(struct resource), GFP_KERNEL);
if (seg->res == NULL) {
@ -582,8 +568,59 @@ out:
mutex_unlock(&dcss_lock);
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
void segment_warning(int rc, char *seg_name)
{
switch (rc) {
case -ENOENT:
PRINT_WARN("cannot load/query segment %s, "
"does not exist\n", seg_name);
break;
case -ENOSYS:
PRINT_WARN("cannot load/query segment %s, "
"not running on VM\n", seg_name);
break;
case -EIO:
PRINT_WARN("cannot load/query segment %s, "
"hardware error\n", seg_name);
break;
case -ENOTSUPP:
PRINT_WARN("cannot load/query segment %s, "
"is a multi-part segment\n", seg_name);
break;
case -ENOSPC:
PRINT_WARN("cannot load/query segment %s, "
"overlaps with storage\n", seg_name);
break;
case -EBUSY:
PRINT_WARN("cannot load/query segment %s, "
"overlaps with already loaded dcss\n", seg_name);
break;
case -EPERM:
PRINT_WARN("cannot load/query segment %s, "
"already loaded in incompatible mode\n", seg_name);
break;
case -ENOMEM:
PRINT_WARN("cannot load/query segment %s, "
"out of memory\n", seg_name);
break;
case -ERANGE:
PRINT_WARN("cannot load/query segment %s, "
"exceeds kernel mapping range\n", seg_name);
break;
default:
PRINT_WARN("cannot load/query segment %s, "
"return value %i\n", seg_name, rc);
break;
}
}
EXPORT_SYMBOL(segment_load);
EXPORT_SYMBOL(segment_unload);
EXPORT_SYMBOL(segment_save);
EXPORT_SYMBOL(segment_type);
EXPORT_SYMBOL(segment_modify_shared);
EXPORT_SYMBOL(segment_warning);

View File

@ -28,11 +28,11 @@
#include <linux/hardirq.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/s390_ext.h>
#include <asm/mmu_context.h>
#include "../kernel/entry.h"
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
@ -50,8 +50,6 @@
extern int sysctl_userprocess_debug;
#endif
extern void die(const char *,struct pt_regs *,long);
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs, long err)
{
@ -245,11 +243,6 @@ static void do_sigbus(struct pt_regs *regs, unsigned long error_code,
}
#ifdef CONFIG_S390_EXEC_PROTECT
extern long sys_sigreturn(struct pt_regs *regs);
extern long sys_rt_sigreturn(struct pt_regs *regs);
extern long sys32_sigreturn(struct pt_regs *regs);
extern long sys32_rt_sigreturn(struct pt_regs *regs);
static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
@ -270,15 +263,15 @@ static int signal_return(struct mm_struct *mm, struct pt_regs *regs,
#ifdef CONFIG_COMPAT
compat = test_tsk_thread_flag(current, TIF_31BIT);
if (compat && instruction == 0x0a77)
sys32_sigreturn(regs);
sys32_sigreturn();
else if (compat && instruction == 0x0aad)
sys32_rt_sigreturn(regs);
sys32_rt_sigreturn();
else
#endif
if (instruction == 0x0a77)
sys_sigreturn(regs);
sys_sigreturn();
else if (instruction == 0x0aad)
sys_rt_sigreturn(regs);
sys_rt_sigreturn();
else {
current->thread.prot_addr = address;
current->thread.trap_no = error_code;
@ -424,7 +417,7 @@ no_context:
}
void __kprobes do_protection_exception(struct pt_regs *regs,
unsigned long error_code)
long error_code)
{
/* Protection exception is supressing, decrement psw address. */
regs->psw.addr -= (error_code >> 16);
@ -440,7 +433,7 @@ void __kprobes do_protection_exception(struct pt_regs *regs,
do_exception(regs, 4, 1);
}
void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code)
void __kprobes do_dat_exception(struct pt_regs *regs, long error_code)
{
do_exception(regs, error_code & 0xff, 0);
}

View File

@ -50,7 +50,6 @@ void show_mem(void)
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages << (PAGE_SHIFT - 10));
i = max_mapnr;
while (i-- > 0) {
if (!pfn_valid(i))

View File

@ -116,6 +116,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
err = -EAGAIN;
if (!bytes_read && (filp->f_flags & O_NONBLOCK))
goto out;
if (bytes_read < 0) {
err = bytes_read;
goto out;
}
err = -EFAULT;
while (bytes_read && size) {

View File

@ -64,6 +64,7 @@ config ZCRYPT
tristate "Support for PCI-attached cryptographic adapters"
depends on S390
select ZCRYPT_MONOLITHIC if ZCRYPT="y"
select HW_RANDOM
help
Select this option if you want to use a PCI-attached cryptographic
adapter like:

View File

@ -20,6 +20,7 @@ config DCSSBLK
config DASD
tristate "Support for DASD devices"
depends on CCW && BLOCK
select IOSCHED_DEADLINE
help
Enable this option if you want to access DASDs directly utilizing
S/390s channel subsystem commands. This is necessary for running

View File

@ -980,12 +980,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
break;
case -ETIMEDOUT:
printk(KERN_WARNING"%s(%s): request timed out\n",
__FUNCTION__, cdev->dev.bus_id);
__func__, cdev->dev.bus_id);
//FIXME - dasd uses own timeout interface...
break;
default:
printk(KERN_WARNING"%s(%s): unknown error %ld\n",
__FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb));
__func__, cdev->dev.bus_id, PTR_ERR(irb));
}
return;
}
@ -1956,6 +1956,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
block->request_queue->queuedata = block;
elevator_exit(block->request_queue->elevator);
block->request_queue->elevator = NULL;
rc = elevator_init(block->request_queue, "deadline");
if (rc) {
blk_cleanup_queue(block->request_queue);
@ -2298,9 +2299,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
struct dasd_block *block = device->block;
max_count = block->bdev ? 0 : -1;
open_count = (int) atomic_read(&block->open_count);
max_count = device->block->bdev ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
printk(KERN_WARNING "Can't offline dasd "

View File

@ -1995,6 +1995,36 @@ dasd_3990_erp_compound(struct dasd_ccw_req * erp, char *sense)
} /* end dasd_3990_erp_compound */
/*
*DASD_3990_ERP_HANDLE_SIM
*
*DESCRIPTION
* inspects the SIM SENSE data and starts an appropriate action
*
* PARAMETER
* sense sense data of the actual error
*
* RETURN VALUES
* none
*/
void
dasd_3990_erp_handle_sim(struct dasd_device *device, char *sense)
{
/* print message according to log or message to operator mode */
if ((sense[24] & DASD_SIM_MSG_TO_OP) || (sense[1] & 0x10)) {
/* print SIM SRC from RefCode */
DEV_MESSAGE(KERN_ERR, device, "SIM - SRC: "
"%02x%02x%02x%02x", sense[22],
sense[23], sense[11], sense[12]);
} else if (sense[24] & DASD_SIM_LOG) {
/* print SIM SRC Refcode */
DEV_MESSAGE(KERN_WARNING, device, "SIM - SRC: "
"%02x%02x%02x%02x", sense[22],
sense[23], sense[11], sense[12]);
}
}
/*
* DASD_3990_ERP_INSPECT_32
*
@ -2018,6 +2048,10 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_inspect_32;
/* check for SIM sense data */
if ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)
dasd_3990_erp_handle_sim(device, sense);
if (sense[25] & DASD_SENSE_BIT_0) {
/* compound program action codes (byte25 bit 0 == '1') */

View File

@ -745,6 +745,19 @@ static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
spin_unlock_irqrestore(&lcu->lock, flags);
}
static void __stop_device_on_lcu(struct dasd_device *device,
struct dasd_device *pos)
{
/* If pos == device then device is already locked! */
if (pos == device) {
pos->stopped |= DASD_STOPPED_SU;
return;
}
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
spin_unlock(get_ccwdev_lock(pos->cdev));
}
/*
* This function is called in interrupt context, so the
* cdev lock for device is already locked!
@ -755,35 +768,15 @@ static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
struct alias_pav_group *pavgroup;
struct dasd_device *pos;
list_for_each_entry(pos, &lcu->active_devices, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &lcu->inactive_devices, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &lcu->active_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pavgroup, &lcu->grouplist, group) {
list_for_each_entry(pos, &pavgroup->baselist, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list) {
if (pos != device)
spin_lock(get_ccwdev_lock(pos->cdev));
pos->stopped |= DASD_STOPPED_SU;
if (pos != device)
spin_unlock(get_ccwdev_lock(pos->cdev));
}
list_for_each_entry(pos, &pavgroup->baselist, alias_list)
__stop_device_on_lcu(device, pos);
list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
__stop_device_on_lcu(device, pos);
}
}

View File

@ -1415,6 +1415,13 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
return;
}
/* service information message SIM */
if ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE) {
dasd_3990_erp_handle_sim(device, irb->ecw);
return;
}
/* just report other unsolicited interrupts */
DEV_MESSAGE(KERN_DEBUG, device, "%s",
"unsolicited interrupt received");

View File

@ -125,7 +125,8 @@ dasd_fba_check_characteristics(struct dasd_device *device)
private = (struct dasd_fba_private *) device->private;
if (private == NULL) {
private = kzalloc(sizeof(struct dasd_fba_private), GFP_KERNEL);
private = kzalloc(sizeof(struct dasd_fba_private),
GFP_KERNEL | GFP_DMA);
if (private == NULL) {
DEV_MESSAGE(KERN_WARNING, device, "%s",
"memory allocation failed for private "

View File

@ -72,6 +72,11 @@ struct dasd_block;
#define DASD_SENSE_BIT_2 0x20
#define DASD_SENSE_BIT_3 0x10
/* BIT DEFINITIONS FOR SIM SENSE */
#define DASD_SIM_SENSE 0x0F
#define DASD_SIM_MSG_TO_OP 0x03
#define DASD_SIM_LOG 0x0C
/*
* SECTION: MACROs for klogd and s390 debug feature (dbf)
*/
@ -621,6 +626,7 @@ void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
/* externals in dasd_3990_erp.c */
struct dasd_ccw_req *dasd_3990_erp_action(struct dasd_ccw_req *);
void dasd_3990_erp_handle_sim(struct dasd_device *, char *);
/* externals in dasd_eer.c */
#ifdef CONFIG_DASD_EER

View File

@ -142,57 +142,6 @@ dcssblk_get_device_by_name(char *name)
return NULL;
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
static void
dcssblk_segment_warn(int rc, char* seg_name)
{
switch (rc) {
case -ENOENT:
PRINT_WARN("cannot load/query segment %s, does not exist\n",
seg_name);
break;
case -ENOSYS:
PRINT_WARN("cannot load/query segment %s, not running on VM\n",
seg_name);
break;
case -EIO:
PRINT_WARN("cannot load/query segment %s, hardware error\n",
seg_name);
break;
case -ENOTSUPP:
PRINT_WARN("cannot load/query segment %s, is a multi-part "
"segment\n", seg_name);
break;
case -ENOSPC:
PRINT_WARN("cannot load/query segment %s, overlaps with "
"storage\n", seg_name);
break;
case -EBUSY:
PRINT_WARN("cannot load/query segment %s, overlaps with "
"already loaded dcss\n", seg_name);
break;
case -EPERM:
PRINT_WARN("cannot load/query segment %s, already loaded in "
"incompatible mode\n", seg_name);
break;
case -ENOMEM:
PRINT_WARN("cannot load/query segment %s, out of memory\n",
seg_name);
break;
case -ERANGE:
PRINT_WARN("cannot load/query segment %s, exceeds kernel "
"mapping range\n", seg_name);
break;
default:
PRINT_WARN("cannot load/query segment %s, return value %i\n",
seg_name, rc);
break;
}
}
static void dcssblk_unregister_callback(struct device *dev)
{
device_unregister(dev);
@ -423,7 +372,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = segment_load(local_buf, SEGMENT_SHARED,
&dev_info->start, &dev_info->end);
if (rc < 0) {
dcssblk_segment_warn(rc, dev_info->segment_name);
segment_warning(rc, dev_info->segment_name);
goto dealloc_gendisk;
}
seg_byte_size = (dev_info->end - dev_info->start + 1);

View File

@ -111,56 +111,6 @@ static void dcss_mkname(char *ascii_name, char *ebcdic_name)
ASCEBC(ebcdic_name, 8);
}
/*
* print appropriate error message for segment_load()/segment_type()
* return code
*/
static void mon_segment_warn(int rc, char* seg_name)
{
switch (rc) {
case -ENOENT:
P_WARNING("cannot load/query segment %s, does not exist\n",
seg_name);
break;
case -ENOSYS:
P_WARNING("cannot load/query segment %s, not running on VM\n",
seg_name);
break;
case -EIO:
P_WARNING("cannot load/query segment %s, hardware error\n",
seg_name);
break;
case -ENOTSUPP:
P_WARNING("cannot load/query segment %s, is a multi-part "
"segment\n", seg_name);
break;
case -ENOSPC:
P_WARNING("cannot load/query segment %s, overlaps with "
"storage\n", seg_name);
break;
case -EBUSY:
P_WARNING("cannot load/query segment %s, overlaps with "
"already loaded dcss\n", seg_name);
break;
case -EPERM:
P_WARNING("cannot load/query segment %s, already loaded in "
"incompatible mode\n", seg_name);
break;
case -ENOMEM:
P_WARNING("cannot load/query segment %s, out of memory\n",
seg_name);
break;
case -ERANGE:
P_WARNING("cannot load/query segment %s, exceeds kernel "
"mapping range\n", seg_name);
break;
default:
P_WARNING("cannot load/query segment %s, return value %i\n",
seg_name, rc);
break;
}
}
static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
return *(u32 *) &monmsg->msg.rmmsg;
@ -585,7 +535,7 @@ static int __init mon_init(void)
rc = segment_type(mon_dcss_name);
if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name);
segment_warning(rc, mon_dcss_name);
goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
@ -598,7 +548,7 @@ static int __init mon_init(void)
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name);
segment_warning(rc, mon_dcss_name);
rc = -EINVAL;
goto out_iucv;
}

View File

@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (in_atomic())
if (in_interrupt())
sclp_sync_wait();
else
wait_event(sclp_tty_waitq,

View File

@ -383,7 +383,7 @@ sclp_vt220_timeout(unsigned long data)
*/
static int
__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
int convertlf)
int convertlf, int may_schedule)
{
unsigned long flags;
void *page;
@ -398,9 +398,8 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
/* Create a sclp output buffer if none exists yet */
if (sclp_vt220_current_request == NULL) {
while (list_empty(&sclp_vt220_empty)) {
spin_unlock_irqrestore(&sclp_vt220_lock,
flags);
if (in_atomic())
spin_unlock_irqrestore(&sclp_vt220_lock, flags);
if (in_interrupt() || !may_schedule)
sclp_sync_wait();
else
wait_event(sclp_vt220_waitq,
@ -450,7 +449,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
static int
sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
{
return __sclp_vt220_write(buf, count, 1, 0);
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
#define SCLP_VT220_SESSION_ENDED 0x01
@ -529,7 +528,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
static void
sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
{
__sclp_vt220_write(&ch, 1, 0, 0);
__sclp_vt220_write(&ch, 1, 0, 0, 1);
}
/*
@ -746,7 +745,7 @@ __initcall(sclp_vt220_tty_init);
static void
sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
{
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1);
__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
}
static struct tty_driver *

View File

@ -394,7 +394,7 @@ tape_34xx_unit_check(struct tape_device *device, struct tape_request *request,
return tape_34xx_erp_failed(request, -ENOSPC);
default:
PRINT_ERR("Invalid op in %s:%i\n",
__FUNCTION__, __LINE__);
__func__, __LINE__);
return tape_34xx_erp_failed(request, 0);
}
}

View File

@ -83,9 +83,9 @@ tapechar_setup_device(struct tape_device * device)
void
tapechar_cleanup_device(struct tape_device *device)
{
unregister_tape_dev(device->rt);
unregister_tape_dev(&device->cdev->dev, device->rt);
device->rt = NULL;
unregister_tape_dev(device->nt);
unregister_tape_dev(&device->cdev->dev, device->nt);
device->nt = NULL;
}

View File

@ -99,11 +99,10 @@ fail_with_tcd:
}
EXPORT_SYMBOL(register_tape_dev);
void unregister_tape_dev(struct tape_class_device *tcd)
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&tcd->class_device->kobj,
tcd->mode_name);
sysfs_remove_link(&device->kobj, tcd->mode_name);
device_destroy(tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);

View File

@ -56,6 +56,6 @@ struct tape_class_device *register_tape_dev(
char * device_name,
char * node_name
);
void unregister_tape_dev(struct tape_class_device *tcd);
void unregister_tape_dev(struct device *device, struct tape_class_device *tcd);
#endif /* __TAPE_CLASS_H__ */

View File

@ -100,7 +100,8 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
urd->reclen = cdev->id.driver_info;
ccw_device_get_id(cdev, &urd->dev_id);
mutex_init(&urd->io_mutex);
mutex_init(&urd->open_mutex);
init_waitqueue_head(&urd->wait);
spin_lock_init(&urd->open_lock);
atomic_set(&urd->ref_count, 1);
urd->cdev = cdev;
get_device(&cdev->dev);
@ -678,17 +679,21 @@ static int ur_open(struct inode *inode, struct file *file)
if (!urd)
return -ENXIO;
if (file->f_flags & O_NONBLOCK) {
if (!mutex_trylock(&urd->open_mutex)) {
spin_lock(&urd->open_lock);
while (urd->open_flag) {
spin_unlock(&urd->open_lock);
if (file->f_flags & O_NONBLOCK) {
rc = -EBUSY;
goto fail_put;
}
} else {
if (mutex_lock_interruptible(&urd->open_mutex)) {
if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) {
rc = -ERESTARTSYS;
goto fail_put;
}
spin_lock(&urd->open_lock);
}
urd->open_flag++;
spin_unlock(&urd->open_lock);
TRACE("ur_open\n");
@ -720,7 +725,9 @@ static int ur_open(struct inode *inode, struct file *file)
fail_urfile_free:
urfile_free(urf);
fail_unlock:
mutex_unlock(&urd->open_mutex);
spin_lock(&urd->open_lock);
urd->open_flag--;
spin_unlock(&urd->open_lock);
fail_put:
urdev_put(urd);
return rc;
@ -731,7 +738,10 @@ static int ur_release(struct inode *inode, struct file *file)
struct urfile *urf = file->private_data;
TRACE("ur_release\n");
mutex_unlock(&urf->urd->open_mutex);
spin_lock(&urf->urd->open_lock);
urf->urd->open_flag--;
spin_unlock(&urf->urd->open_lock);
wake_up_interruptible(&urf->urd->wait);
urdev_put(urf->urd);
urfile_free(urf);
return 0;

View File

@ -62,7 +62,6 @@ struct file_control_block {
struct urdev {
struct ccw_device *cdev; /* Backpointer to ccw device */
struct mutex io_mutex; /* Serialises device IO */
struct mutex open_mutex; /* Serialises access to device */
struct completion *io_done; /* do_ur_io waits; irq completes */
struct device *device;
struct cdev *char_device;
@ -71,6 +70,9 @@ struct urdev {
int class; /* VM device class */
int io_request_rc; /* return code from I/O request */
atomic_t ref_count; /* reference counter */
wait_queue_head_t wait; /* wait queue to serialize open */
int open_flag; /* "urdev is open" flag */
spinlock_t open_lock; /* serialize critical sections */
};
/*

View File

@ -96,7 +96,7 @@ static int vmwdt_keepalive(void)
if (ret) {
printk(KERN_WARNING "%s: problem setting interval %d, "
"cmd %s\n", __FUNCTION__, vmwdt_interval,
"cmd %s\n", __func__, vmwdt_interval,
vmwdt_cmd);
}
return ret;
@ -107,7 +107,7 @@ static int vmwdt_disable(void)
int ret = __diag288(wdt_cancel, 0, "", 0);
if (ret) {
printk(KERN_WARNING "%s: problem disabling watchdog\n",
__FUNCTION__);
__func__);
}
return ret;
}

View File

@ -224,7 +224,7 @@ static int __init init_cpu_info(enum arch_id arch)
sa = kmalloc(sizeof(*sa), GFP_KERNEL);
if (!sa) {
ERROR_MSG("kmalloc failed: %s: %i\n",__FUNCTION__, __LINE__);
ERROR_MSG("kmalloc failed: %s: %i\n",__func__, __LINE__);
return -ENOMEM;
}
if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) {

View File

@ -217,6 +217,8 @@ void chsc_chp_offline(struct chp_id chpid)
if (chp_get_status(chpid) <= 0)
return;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &chpid);
}
@ -303,7 +305,8 @@ static void s390_process_res_acc (struct res_acc_data *res_data)
sprintf(dbf_txt, "fla%x", res_data->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
}
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* I/O resources may have become accessible.
* Scan through all subchannels that may be concerned and
@ -561,9 +564,12 @@ void chsc_chp_online(struct chp_id chpid)
sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
CIO_TRACE_EVENT(2, dbf_txt);
if (chp_get_status(chpid) != 0)
if (chp_get_status(chpid) != 0) {
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
for_each_subchannel_staged(__chp_add, __chp_add_new_sch,
&chpid);
}
}
static void __s390_subchannel_vary_chpid(struct subchannel *sch,
@ -650,6 +656,8 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
*/
int chsc_chp_vary(struct chp_id chpid, int on)
{
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
* Redo PathVerification on the devices the chpid connects to
*/
@ -758,7 +766,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
if (!secm_area)
return -ENOMEM;
mutex_lock(&css->mutex);
if (enable && !css->cm_enabled) {
css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
@ -766,7 +773,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
free_page((unsigned long)secm_area);
mutex_unlock(&css->mutex);
return -ENOMEM;
}
}
@ -787,7 +793,6 @@ chsc_secm(struct channel_subsystem *css, int enable)
free_page((unsigned long)css->cub_addr1);
free_page((unsigned long)css->cub_addr2);
}
mutex_unlock(&css->mutex);
free_page((unsigned long)secm_area);
return ret;
}

View File

@ -24,6 +24,7 @@
#include <asm/ipl.h>
#include <asm/chpid.h>
#include <asm/airq.h>
#include <asm/cpu.h>
#include "cio.h"
#include "css.h"
#include "chsc.h"
@ -649,13 +650,10 @@ do_IRQ (struct pt_regs *regs)
old_regs = set_irq_regs(regs);
irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
/**
* Make sure that the i/o interrupt did not "overtake"
* the last HZ timer interrupt.
*/
account_ticks(S390_lowcore.int_clock);
s390_idle_check();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
/*
* Get interrupt information from lowcore
*/
@ -672,10 +670,14 @@ do_IRQ (struct pt_regs *regs)
continue;
}
sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch)
spin_lock(sch->lock);
if (!sch) {
/* Clear pending interrupt condition. */
tsch(tpi_info->schid, irb);
continue;
}
spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) {
if (tsch(tpi_info->schid, irb) == 0) {
/* Keep subchannel information word up to date. */
memcpy (&sch->schib.scsw, &irb->scsw,
sizeof (irb->scsw));
@ -683,8 +685,7 @@ do_IRQ (struct pt_regs *regs)
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
}
if (sch)
spin_unlock(sch->lock);
spin_unlock(sch->lock);
/*
* Are more interrupts pending?
* If so, the tpi instruction will update the lowcore
@ -710,8 +711,9 @@ void *cio_get_console_priv(void)
/*
* busy wait for the next interrupt on the console
*/
void
wait_cons_dev (void)
void wait_cons_dev(void)
__releases(console_subchannel.lock)
__acquires(console_subchannel.lock)
{
unsigned long cr6 __attribute__ ((aligned (8)));
unsigned long save_cr6 __attribute__ ((aligned (8)));

View File

@ -100,6 +100,7 @@ extern int cio_modify (struct subchannel *);
int cio_create_sch_lock(struct subchannel *);
void do_adapter_IO(void);
void do_IRQ(struct pt_regs *);
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE

View File

@ -533,6 +533,12 @@ void css_schedule_eval_all(void)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
void css_wait_for_slow_path(void)
{
flush_workqueue(ccw_device_notify_work);
flush_workqueue(slow_path_wq);
}
/* Reprobe subchannel if unregistered. */
static int reprobe_subchannel(struct subchannel_id schid, void *data)
{
@ -683,10 +689,14 @@ css_cm_enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
if (!css)
return 0;
return sprintf(buf, "%x\n", css->cm_enabled);
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
static ssize_t
@ -696,6 +706,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
struct channel_subsystem *css = to_css(dev);
int ret;
mutex_lock(&css->mutex);
switch (buf[0]) {
case '0':
ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
@ -706,6 +717,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
default:
ret = -EINVAL;
}
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
@ -752,9 +764,11 @@ static int css_reboot_event(struct notifier_block *this,
struct channel_subsystem *css;
css = channel_subsystems[i];
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
ret = NOTIFY_BAD;
mutex_unlock(&css->mutex);
}
return ret;

View File

@ -144,6 +144,7 @@ struct schib;
int css_sch_is_valid(struct schib *);
extern struct workqueue_struct *slow_path_wq;
void css_wait_for_slow_path(void);
extern struct attribute_group *subch_attr_groups[];
#endif

View File

@ -577,7 +577,6 @@ static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
static DEVICE_ATTR(online, 0644, online_show, online_store);
extern struct device_attribute dev_attr_cmb_enable;
static DEVICE_ATTR(availability, 0444, available_show, NULL);
static struct attribute * subch_attrs[] = {

View File

@ -127,4 +127,5 @@ extern struct bus_type ccw_bus_type;
void retry_set_schib(struct ccw_device *cdev);
void cmf_retry_copy_block(struct ccw_device *);
int cmf_reenable(struct ccw_device *);
extern struct device_attribute dev_attr_cmb_enable;
#endif

View File

@ -193,8 +193,15 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
return -EACCES;
}
ret = cio_start_key (sch, cpa, lpm, key);
if (ret == 0)
switch (ret) {
case 0:
cdev->private->intparm = intparm;
break;
case -EACCES:
case -ENODEV:
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
break;
}
return ret;
}

View File

@ -62,7 +62,7 @@ ccw_device_path_notoper(struct ccw_device *cdev)
stsch (sch->schid, &sch->schib);
CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are "
"not operational \n", __FUNCTION__,
"not operational \n", __func__,
sch->schid.ssid, sch->schid.sch_no,
sch->schib.pmcw.pnom);
@ -312,6 +312,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
{
struct subchannel *sch;
struct ccw1 *sense_ccw;
int rc;
sch = to_subchannel(cdev->dev.parent);
@ -337,7 +338,10 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
/* Reset internal retry indication. */
cdev->private->flags.intretry = 0;
return cio_start(sch, sense_ccw, 0xff);
rc = cio_start(sch, sense_ccw, 0xff);
if (rc == -ENODEV || rc == -EACCES)
dev_fsm_event(cdev, DEV_EVENT_VERIFY);
return rc;
}
/*

View File

@ -1399,7 +1399,7 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
* q->dev_st_chg_ind is the indicator, be it shared or not.
* only clear it, if indicator is non-shared
*/
if (!spare_ind_was_set)
if (q->dev_st_chg_ind != &spare_indicator)
tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
if (q->hydra_gives_outbound_pcis) {
@ -2217,9 +2217,78 @@ qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
return cc;
}
static int
qdio_get_ssqd_information(struct subchannel_id *schid,
struct qdio_chsc_ssqd **ssqd_area)
{
int result;
QDIO_DBF_TEXT0(0, setup, "getssqd");
*ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc on sch x%x.\n",
schid->sch_no);
return -ENOMEM;
}
(*ssqd_area)->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
(*ssqd_area)->first_sch = schid->sch_no;
(*ssqd_area)->last_sch = schid->sch_no;
(*ssqd_area)->ssid = schid->ssid;
result = chsc(*ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i on sch 0.%x.%x.\n",
result, schid->ssid, schid->sch_no);
goto out;
}
if ((*ssqd_area)->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("CHSC response is 0x%x on sch 0.%x.%x.\n",
(*ssqd_area)->response.code,
schid->ssid, schid->sch_no);
goto out;
}
if (!((*ssqd_area)->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!((*ssqd_area)->flags & CHSC_FLAG_VALIDITY) ||
((*ssqd_area)->sch != schid->sch_no)) {
QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
"using all SIGAs.\n",
schid->ssid, schid->sch_no);
goto out;
}
return 0;
out:
return -EINVAL;
}
int
qdio_get_ssqd_pct(struct ccw_device *cdev)
{
struct qdio_chsc_ssqd *ssqd_area;
struct subchannel_id schid;
char dbf_text[15];
int rc;
int pct = 0;
QDIO_DBF_TEXT0(0, setup, "getpct");
schid = ccw_device_get_subchannel_id(cdev);
rc = qdio_get_ssqd_information(&schid, &ssqd_area);
if (!rc)
pct = (int)ssqd_area->pct;
if (rc != -ENOMEM)
mempool_free(ssqd_area, qdio_mempool_scssc);
sprintf(dbf_text, "pct: %d", pct);
QDIO_DBF_TEXT2(0, setup, dbf_text);
return pct;
}
EXPORT_SYMBOL(qdio_get_ssqd_pct);
static void
qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
unsigned long token)
qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned long token)
{
struct qdio_q *q;
int i;
@ -2227,7 +2296,7 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
char dbf_text[15];
/*check if QEBSM is disabled */
if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
if (!(irq_ptr->is_qebsm) || !(irq_ptr->qdioac & 0x01)) {
irq_ptr->is_qebsm = 0;
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
@ -2256,102 +2325,27 @@ qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
}
static void
qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
qdio_get_ssqd_siga(struct qdio_irq *irq_ptr)
{
int result;
unsigned char qdioac;
struct {
struct chsc_header request;
u16 reserved1:10;
u16 ssid:2;
u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 reserved7;
u8 icnt;
u8 reserved8;
u8 ocnt;
u8 reserved9;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
} *ssqd_area;
int rc;
struct qdio_chsc_ssqd *ssqd_area;
QDIO_DBF_TEXT0(0,setup,"getssqd");
qdioac = 0;
ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
if (!ssqd_area) {
QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
"SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
irq_ptr->qdioac = 0;
rc = qdio_get_ssqd_information(&irq_ptr->schid, &ssqd_area);
if (rc) {
QDIO_PRINT_WARN("using all SIGAs for sch x%x.n",
irq_ptr->schid.sch_no);
irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
return;
}
} else
irq_ptr->qdioac = ssqd_area->qdioac1;
ssqd_area->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
ssqd_area->first_sch = irq_ptr->schid.sch_no;
ssqd_area->last_sch = irq_ptr->schid.sch_no;
ssqd_area->ssid = irq_ptr->schid.ssid;
result = chsc(ssqd_area);
if (result) {
QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
"SIGAs for sch 0.%x.%x.\n", result,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
}
if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
QDIO_PRINT_WARN("response upon checking SIGA needs " \
"is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
ssqd_area->response.code,
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
irq_ptr->is_qebsm = 0;
goto out;
}
if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
(ssqd_area->sch != irq_ptr->schid.sch_no)) {
QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
"using all SIGAs.\n",
irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
irq_ptr->is_qebsm = 0;
goto out;
}
qdioac = ssqd_area->qdioac1;
out:
qdio_check_subchannel_qebsm(irq_ptr, qdioac,
ssqd_area->sch_token);
mempool_free(ssqd_area, qdio_mempool_scssc);
irq_ptr->qdioac = qdioac;
qdio_check_subchannel_qebsm(irq_ptr, ssqd_area->sch_token);
if (rc != -ENOMEM)
mempool_free(ssqd_area, qdio_mempool_scssc);
}
static unsigned int
@ -3227,7 +3221,7 @@ qdio_establish(struct qdio_initialize *init_data)
return -EIO;
}
qdio_get_ssqd_information(irq_ptr);
qdio_get_ssqd_siga(irq_ptr);
/* if this gets set once, we're running under VM and can omit SVSes */
if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
omit_svs=1;

View File

@ -406,6 +406,34 @@ do_clear_global_summary(void)
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
struct qdio_chsc_ssqd {
struct chsc_header request;
u16 reserved1:10;
u16 ssid:2;
u16 fmt:4;
u16 first_sch;
u16 reserved2;
u16 last_sch;
u32 reserved3;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pct;
u8 icnt;
u8 reserved7;
u8 ocnt;
u8 reserved8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
};
struct qdio_perf_stats {
#ifdef CONFIG_64BIT
atomic64_t tl_runs;

View File

@ -45,7 +45,7 @@ static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long);
/**
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
@ -53,7 +53,7 @@ MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
"Copyright 2006 IBM Corporation");
MODULE_LICENSE("GPL");
/**
/*
* Module parameter
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
@ -69,7 +69,7 @@ static struct device *ap_root_device = NULL;
static DEFINE_SPINLOCK(ap_device_lock);
static LIST_HEAD(ap_device_list);
/**
/*
* Workqueue & timer for bus rescan.
*/
static struct workqueue_struct *ap_work_queue;
@ -77,7 +77,7 @@ static struct timer_list ap_config_timer;
static int ap_config_time = AP_CONFIG_TIME;
static DECLARE_WORK(ap_config_work, ap_scan_bus);
/**
/*
* Tasklet & timer for AP request polling.
*/
static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
@ -88,9 +88,9 @@ static struct task_struct *ap_poll_kthread = NULL;
static DEFINE_MUTEX(ap_poll_thread_mutex);
/**
* Test if ap instructions are available.
* ap_intructions_available() - Test if AP instructions are available.
*
* Returns 0 if the ap instructions are installed.
* Returns 0 if the AP instructions are installed.
*/
static inline int ap_instructions_available(void)
{
@ -108,12 +108,12 @@ static inline int ap_instructions_available(void)
}
/**
* Test adjunct processor queue.
* @qid: the ap queue number
* @queue_depth: pointer to queue depth value
* @device_type: pointer to device type value
* ap_test_queue(): Test adjunct processor queue.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
* @device_type: Pointer to device type value
*
* Returns ap queue status structure.
* Returns AP queue status structure.
*/
static inline struct ap_queue_status
ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
@ -130,10 +130,10 @@ ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
}
/**
* Reset adjunct processor queue.
* @qid: the ap queue number
* ap_reset_queue(): Reset adjunct processor queue.
* @qid: The AP queue number
*
* Returns ap queue status structure.
* Returns AP queue status structure.
*/
static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
{
@ -148,16 +148,14 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
}
/**
* Send message to adjunct processor queue.
* @qid: the ap queue number
* @psmid: the program supplied message identifier
* @msg: the message text
* @length: the message length
*
* Returns ap queue status structure.
* __ap_send(): Send message to adjunct processor queue.
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on NQAP can't happen because the L bit is 1.
*
* Condition code 2 on NQAP also means the send is incomplete,
* because a segment boundary was reached. The NQAP is repeated.
*/
@ -198,23 +196,20 @@ int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
}
EXPORT_SYMBOL(ap_send);
/*
* Receive message from adjunct processor queue.
* @qid: the ap queue number
* @psmid: pointer to program supplied message identifier
* @msg: the message text
* @length: the message length
*
* Returns ap queue status structure.
/**
* __ap_recv(): Receive message from adjunct processor queue.
* @qid: The AP queue number
* @psmid: Pointer to program supplied message identifier
* @msg: The message text
* @length: The message length
*
* Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place
* but only partially. The response is incomplete, hence the
* DQAP is repeated.
*
* Condition code 2 on DQAP also means the receive is incomplete,
* this time because a segment boundary was reached. Again, the
* DQAP is repeated.
*
* Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction.
@ -263,11 +258,12 @@ int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
EXPORT_SYMBOL(ap_recv);
/**
* Check if an AP queue is available. The test is repeated for
* AP_MAX_RESET times.
* @qid: the ap queue number
* @queue_depth: pointer to queue depth value
* @device_type: pointer to device type value
* ap_query_queue(): Check if an AP queue is available.
* @qid: The AP queue number
* @queue_depth: Pointer to queue depth value
* @device_type: Pointer to device type value
*
* The test is repeated for AP_MAX_RESET times.
*/
static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
{
@ -308,8 +304,10 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
}
/**
* ap_init_queue(): Reset an AP queue.
* @qid: The AP queue number
*
* Reset an AP queue and wait for it to become available again.
* @qid: the ap queue number
*/
static int ap_init_queue(ap_qid_t qid)
{
@ -346,7 +344,10 @@ static int ap_init_queue(ap_qid_t qid)
}
/**
* Arm request timeout if a AP device was idle and a new request is submitted.
* ap_increase_queue_count(): Arm request timeout.
* @ap_dev: Pointer to an AP device.
*
* Arm request timeout if an AP device was idle and a new request is submitted.
*/
static void ap_increase_queue_count(struct ap_device *ap_dev)
{
@ -360,7 +361,10 @@ static void ap_increase_queue_count(struct ap_device *ap_dev)
}
/**
* AP device is still alive, re-schedule request timeout if there are still
* ap_decrease_queue_count(): Decrease queue count.
* @ap_dev: Pointer to an AP device.
*
* If AP device is still alive, re-schedule request timeout if there are still
* pending requests.
*/
static void ap_decrease_queue_count(struct ap_device *ap_dev)
@ -371,7 +375,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
if (ap_dev->queue_count > 0)
mod_timer(&ap_dev->timeout, jiffies + timeout);
else
/**
/*
* The timeout timer should to be disabled now - since
* del_timer_sync() is very expensive, we just tell via the
* reset flag to ignore the pending timeout timer.
@ -379,7 +383,7 @@ static void ap_decrease_queue_count(struct ap_device *ap_dev)
ap_dev->reset = AP_RESET_IGNORE;
}
/**
/*
* AP device related attributes.
*/
static ssize_t ap_hwtype_show(struct device *dev,
@ -433,6 +437,10 @@ static struct attribute_group ap_dev_attr_group = {
};
/**
* ap_bus_match()
* @dev: Pointer to device
* @drv: Pointer to device_driver
*
* AP bus driver registration/unregistration.
*/
static int ap_bus_match(struct device *dev, struct device_driver *drv)
@ -441,7 +449,7 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/**
/*
* Compare device type of the device with the list of
* supported types of the device_driver.
*/
@ -455,8 +463,12 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
}
/**
* uevent function for AP devices. It sets up a single environment
* variable DEV_TYPE which contains the hardware device type.
* ap_uevent(): Uevent function for AP devices.
* @dev: Pointer to device
* @env: Pointer to kobj_uevent_env
*
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/
static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
{
@ -500,8 +512,10 @@ static int ap_device_probe(struct device *dev)
}
/**
* __ap_flush_queue(): Flush requests.
* @ap_dev: Pointer to the AP device
*
* Flush all requests from the request/pending queue of an AP device.
* @ap_dev: pointer to the AP device.
*/
static void __ap_flush_queue(struct ap_device *ap_dev)
{
@ -565,7 +579,7 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
/**
/*
* AP bus attributes.
*/
static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
@ -630,14 +644,16 @@ static struct bus_attribute *const ap_bus_attrs[] = {
};
/**
* Pick one of the 16 ap domains.
* ap_select_domain(): Select an AP domain.
*
* Pick one of the 16 AP domains.
*/
static int ap_select_domain(void)
{
int queue_depth, device_type, count, max_count, best_domain;
int rc, i, j;
/**
/*
* We want to use a single domain. Either the one specified with
* the "domain=" parameter or the domain with the maximum number
* of devices.
@ -669,8 +685,10 @@ static int ap_select_domain(void)
}
/**
* Find the device type if query queue returned a device type of 0.
* ap_probe_device_type(): Find the device type of an AP.
* @ap_dev: pointer to the AP device.
*
* Find the device type if query queue returned a device type of 0.
*/
static int ap_probe_device_type(struct ap_device *ap_dev)
{
@ -764,7 +782,11 @@ out:
}
/**
* Scan the ap bus for new devices.
* __ap_scan_bus(): Scan the AP bus.
* @dev: Pointer to device
* @data: Pointer to data
*
* Scan the AP bus for new devices.
*/
static int __ap_scan_bus(struct device *dev, void *data)
{
@ -867,6 +889,8 @@ ap_config_timeout(unsigned long ptr)
}
/**
* ap_schedule_poll_timer(): Schedule poll timer.
*
* Set up the timer to run the poll tasklet
*/
static inline void ap_schedule_poll_timer(void)
@ -877,10 +901,11 @@ static inline void ap_schedule_poll_timer(void)
}
/**
* Receive pending reply messages from an AP device.
* ap_poll_read(): Receive pending reply messages from an AP device.
* @ap_dev: pointer to the AP device
* @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed
*
* Returns 0 if the device is still present, -ENODEV if not.
*/
static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
@ -925,10 +950,11 @@ static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
}
/**
* Send messages from the request queue to an AP device.
* ap_poll_write(): Send messages from the request queue to an AP device.
* @ap_dev: pointer to the AP device
* @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed
*
* Returns 0 if the device is still present, -ENODEV if not.
*/
static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
@ -968,11 +994,13 @@ static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
}
/**
* Poll AP device for pending replies and send new messages. If either
* ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
* ap_poll_queue(): Poll AP device for pending replies and send new messages.
* @ap_dev: pointer to the bus device
* @flags: pointer to control flags, bit 2^0 is set if another poll is
* required, bit 2^1 is set if the poll timer needs to get armed
*
* Poll AP device for pending replies and send new messages. If either
* ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
* Returns 0.
*/
static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
@ -986,9 +1014,11 @@ static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
}
/**
* Queue a message to a device.
* __ap_queue_message(): Queue a message to a device.
* @ap_dev: pointer to the AP device
* @ap_msg: the message to be queued
*
* Queue a message to a device. Returns 0 if successful.
*/
static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{
@ -1055,12 +1085,14 @@ void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
EXPORT_SYMBOL(ap_queue_message);
/**
* ap_cancel_message(): Cancel a crypto request.
* @ap_dev: The AP device that has the message queued
* @ap_msg: The message that is to be removed
*
* Cancel a crypto request. This is done by removing the request
* from the devive pendingq or requestq queue. Note that the
* from the device pending or request queue. Note that the
* request stays on the AP queue. When it finishes the message
* reply will be discarded because the psmid can't be found.
* @ap_dev: AP device that has the message queued
* @ap_msg: the message that is to be removed
*/
void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
{
@ -1082,7 +1114,10 @@ void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
EXPORT_SYMBOL(ap_cancel_message);
/**
* AP receive polling for finished AP requests
* ap_poll_timeout(): AP receive polling for finished AP requests.
* @unused: Unused variable.
*
* Schedules the AP tasklet.
*/
static void ap_poll_timeout(unsigned long unused)
{
@ -1090,6 +1125,9 @@ static void ap_poll_timeout(unsigned long unused)
}
/**
* ap_reset(): Reset a not responding AP device.
* @ap_dev: Pointer to the AP device
*
* Reset a not responding AP device and move all requests from the
* pending queue to the request queue.
*/
@ -1108,11 +1146,6 @@ static void ap_reset(struct ap_device *ap_dev)
ap_dev->unregistered = 1;
}
/**
* Poll all AP devices on the bus in a round robin fashion. Continue
* polling until bit 2^0 of the control flags is not set. If bit 2^1
* of the control flags has been set arm the poll timer.
*/
static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
{
spin_lock(&ap_dev->lock);
@ -1126,6 +1159,14 @@ static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
return 0;
}
/**
* ap_poll_all(): Poll all AP devices.
* @dummy: Unused variable
*
* Poll all AP devices on the bus in a round robin fashion. Continue
* polling until bit 2^0 of the control flags is not set. If bit 2^1
* of the control flags has been set arm the poll timer.
*/
static void ap_poll_all(unsigned long dummy)
{
unsigned long flags;
@ -1144,6 +1185,9 @@ static void ap_poll_all(unsigned long dummy)
}
/**
* ap_poll_thread(): Thread that polls for finished requests.
* @data: Unused pointer
*
* AP bus poll thread. The purpose of this thread is to poll for
* finished requests in a loop if there is a "free" cpu - that is
* a cpu that doesn't have anything better to do. The polling stops
@ -1213,7 +1257,10 @@ static void ap_poll_thread_stop(void)
}
/**
* Handling of request timeouts
* ap_request_timeout(): Handling of request timeouts
* @data: Holds the AP device.
*
* Handles request timeouts.
*/
static void ap_request_timeout(unsigned long data)
{
@ -1246,7 +1293,9 @@ static struct reset_call ap_reset_call = {
};
/**
* The module initialization code.
* ap_module_init(): The module initialization code.
*
* Initializes the module.
*/
int __init ap_module_init(void)
{
@ -1288,7 +1337,7 @@ int __init ap_module_init(void)
if (ap_select_domain() == 0)
ap_scan_bus(NULL);
/* Setup the ap bus rescan timer. */
/* Setup the AP bus rescan timer. */
init_timer(&ap_config_timer);
ap_config_timer.function = ap_config_timeout;
ap_config_timer.data = 0;
@ -1325,7 +1374,9 @@ static int __ap_match_all(struct device *dev, void *data)
}
/**
* The module termination code
* ap_modules_exit(): The module termination code
*
* Terminates the module.
*/
void ap_module_exit(void)
{

View File

@ -50,6 +50,15 @@ typedef unsigned int ap_qid_t;
#define AP_QID_QUEUE(_qid) ((_qid) & 15)
/**
* structy ap_queue_status - Holds the AP queue status.
* @queue_empty: Shows if queue is empty
* @replies_waiting: Waiting replies
* @queue_full: Is 1 if the queue is full
* @pad: A 4 bit pad
* @int_enabled: Shows if interrupts are enabled for the AP
* @response_conde: Holds the 8 bit response code
* @pad2: A 16 bit pad
*
* The ap queue status word is returned by all three AP functions
* (PQAP, NQAP and DQAP). There's a set of flags in the first
* byte, followed by a 1 byte response code.
@ -75,7 +84,7 @@ struct ap_queue_status {
#define AP_RESPONSE_NO_FIRST_PART 0x13
#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
/**
/*
* Known device types
*/
#define AP_DEVICE_TYPE_PCICC 3
@ -84,7 +93,7 @@ struct ap_queue_status {
#define AP_DEVICE_TYPE_CEX2A 6
#define AP_DEVICE_TYPE_CEX2C 7
/**
/*
* AP reset flag states
*/
#define AP_RESET_IGNORE 0 /* request timeout will be ignored */
@ -152,7 +161,7 @@ struct ap_message {
.dev_type=(dt), \
.match_flags=AP_DEVICE_ID_MATCH_DEVICE_TYPE,
/**
/*
* Note: don't use ap_send/ap_recv after using ap_queue_message
* for the first time. Otherwise the ap message queue will get
* confused.

View File

@ -36,10 +36,11 @@
#include <linux/compat.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <linux/hw_random.h>
#include "zcrypt_api.h"
/**
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
@ -52,7 +53,10 @@ static LIST_HEAD(zcrypt_device_list);
static int zcrypt_device_count = 0;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
/**
static int zcrypt_rng_device_add(void);
static void zcrypt_rng_device_remove(void);
/*
* Device attributes common for all crypto devices.
*/
static ssize_t zcrypt_type_show(struct device *dev,
@ -99,6 +103,9 @@ static struct attribute_group zcrypt_device_attr_group = {
};
/**
* __zcrypt_increase_preference(): Increase preference of a crypto device.
* @zdev: Pointer the crypto device
*
* Move the device towards the head of the device list.
* Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list.
@ -125,6 +132,9 @@ static void __zcrypt_increase_preference(struct zcrypt_device *zdev)
}
/**
* __zcrypt_decrease_preference(): Decrease preference of a crypto device.
* @zdev: Pointer to a crypto device.
*
* Move the device towards the tail of the device list.
* Need to be called while holding the zcrypt device list lock.
* Note: cards with speed_rating of 0 are kept at the end of the list.
@ -198,7 +208,10 @@ void zcrypt_device_free(struct zcrypt_device *zdev)
EXPORT_SYMBOL(zcrypt_device_free);
/**
* Register a crypto device.
* zcrypt_device_register() - Register a crypto device.
* @zdev: Pointer to a crypto device
*
* Register a crypto device. Returns 0 if successful.
*/
int zcrypt_device_register(struct zcrypt_device *zdev)
{
@ -216,16 +229,37 @@ int zcrypt_device_register(struct zcrypt_device *zdev)
__zcrypt_increase_preference(zdev);
zcrypt_device_count++;
spin_unlock_bh(&zcrypt_device_lock);
if (zdev->ops->rng) {
rc = zcrypt_rng_device_add();
if (rc)
goto out_unregister;
}
return 0;
out_unregister:
spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--;
list_del_init(&zdev->list);
spin_unlock_bh(&zcrypt_device_lock);
sysfs_remove_group(&zdev->ap_dev->device.kobj,
&zcrypt_device_attr_group);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
out:
return rc;
}
EXPORT_SYMBOL(zcrypt_device_register);
/**
* zcrypt_device_unregister(): Unregister a crypto device.
* @zdev: Pointer to crypto device
*
* Unregister a crypto device.
*/
void zcrypt_device_unregister(struct zcrypt_device *zdev)
{
if (zdev->ops->rng)
zcrypt_rng_device_remove();
spin_lock_bh(&zcrypt_device_lock);
zcrypt_device_count--;
list_del_init(&zdev->list);
@ -238,7 +272,9 @@ void zcrypt_device_unregister(struct zcrypt_device *zdev)
EXPORT_SYMBOL(zcrypt_device_unregister);
/**
* zcrypt_read is not be supported beyond zcrypt 1.3.1
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
*/
static ssize_t zcrypt_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
@ -247,6 +283,8 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
}
/**
* zcrypt_write(): Not allowed.
*
* Write is is not allowed
*/
static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
@ -256,7 +294,9 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
}
/**
* Device open/close functions to count number of users.
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
*/
static int zcrypt_open(struct inode *inode, struct file *filp)
{
@ -264,13 +304,18 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
return 0;
}
/**
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
atomic_dec(&zcrypt_open_count);
return 0;
}
/**
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
@ -280,7 +325,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
if (mex->outputdatalength < mex->inputdatalength)
return -EINVAL;
/**
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case
@ -326,7 +371,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
if (crt->outputdatalength < crt->inputdatalength ||
(crt->inputdatalength & 1))
return -EINVAL;
/**
/*
* As long as outputdatalength is big enough, we can set the
* outputdatalength equal to the inputdatalength, since that is the
* number of bytes we will copy in any case
@ -343,7 +388,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
zdev->max_mod_size < crt->inputdatalength)
continue;
if (zdev->short_crt && crt->inputdatalength > 240) {
/**
/*
* Check inputdata for leading zeros for cards
* that can't handle np_prime, bp_key, or
* u_mult_inv > 128 bytes.
@ -359,7 +404,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
copy_from_user(&z3, crt->u_mult_inv, len))
return -EFAULT;
copied = 1;
/**
/*
* We have to restart device lookup -
* the device list may have changed by now.
*/
@ -427,6 +472,37 @@ static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
return -ENODEV;
}
static long zcrypt_rng(char *buffer)
{
struct zcrypt_device *zdev;
int rc;
spin_lock_bh(&zcrypt_device_lock);
list_for_each_entry(zdev, &zcrypt_device_list, list) {
if (!zdev->online || !zdev->ops->rng)
continue;
zcrypt_device_get(zdev);
get_device(&zdev->ap_dev->device);
zdev->request_count++;
__zcrypt_decrease_preference(zdev);
if (try_module_get(zdev->ap_dev->drv->driver.owner)) {
spin_unlock_bh(&zcrypt_device_lock);
rc = zdev->ops->rng(zdev, buffer);
spin_lock_bh(&zcrypt_device_lock);
module_put(zdev->ap_dev->drv->driver.owner);
} else
rc = -EAGAIN;
zdev->request_count--;
__zcrypt_increase_preference(zdev);
put_device(&zdev->ap_dev->device);
zcrypt_device_put(zdev);
spin_unlock_bh(&zcrypt_device_lock);
return rc;
}
spin_unlock_bh(&zcrypt_device_lock);
return -ENODEV;
}
static void zcrypt_status_mask(char status[AP_DEVICES])
{
struct zcrypt_device *zdev;
@ -514,6 +590,8 @@ static int zcrypt_count_type(int type)
}
/**
* zcrypt_ica_status(): Old, depracted combi status call.
*
* Old, deprecated combi status call.
*/
static long zcrypt_ica_status(struct file *filp, unsigned long arg)
@ -615,7 +693,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
(int __user *) arg);
case Z90STAT_DOMAIN_INDEX:
return put_user(ap_domain_index, (int __user *) arg);
/**
/*
* Deprecated ioctls. Don't add another device count ioctl,
* you can count them yourself in the user space with the
* output of the Z90STAT_STATUS_MASK ioctl.
@ -653,7 +731,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
#ifdef CONFIG_COMPAT
/**
/*
* ioctl32 conversion routines
*/
struct compat_ica_rsa_modexpo {
@ -804,7 +882,7 @@ static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
}
#endif
/**
/*
* Misc device file operations.
*/
static const struct file_operations zcrypt_fops = {
@ -819,7 +897,7 @@ static const struct file_operations zcrypt_fops = {
.release = zcrypt_release
};
/**
/*
* Misc device.
*/
static struct miscdevice zcrypt_misc_device = {
@ -828,7 +906,7 @@ static struct miscdevice zcrypt_misc_device = {
.fops = &zcrypt_fops,
};
/**
/*
* Deprecated /proc entry support.
*/
static struct proc_dir_entry *zcrypt_entry;
@ -1022,7 +1100,7 @@ static int zcrypt_status_write(struct file *file, const char __user *buffer,
}
for (j = 0; j < 64 && *ptr; ptr++) {
/**
/*
* '0' for no device, '1' for PCICA, '2' for PCICC,
* '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
* '5' for CEX2C and '6' for CEX2A'
@ -1041,7 +1119,76 @@ out:
return count;
}
static int zcrypt_rng_device_count;
static u32 *zcrypt_rng_buffer;
static int zcrypt_rng_buffer_index;
static DEFINE_MUTEX(zcrypt_rng_mutex);
static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
{
int rc;
/*
* We don't need locking here because the RNG API guarantees serialized
* read method calls.
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *) zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof *data;
}
*data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
return sizeof *data;
}
static struct hwrng zcrypt_rng_dev = {
.name = "zcrypt",
.data_read = zcrypt_rng_data_read,
};
static int zcrypt_rng_device_add(void)
{
int rc = 0;
mutex_lock(&zcrypt_rng_mutex);
if (zcrypt_rng_device_count == 0) {
zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!zcrypt_rng_buffer) {
rc = -ENOMEM;
goto out;
}
zcrypt_rng_buffer_index = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
zcrypt_rng_device_count = 1;
} else
zcrypt_rng_device_count++;
mutex_unlock(&zcrypt_rng_mutex);
return 0;
out_free:
free_page((unsigned long) zcrypt_rng_buffer);
out:
mutex_unlock(&zcrypt_rng_mutex);
return rc;
}
static void zcrypt_rng_device_remove(void)
{
mutex_lock(&zcrypt_rng_mutex);
zcrypt_rng_device_count--;
if (zcrypt_rng_device_count == 0) {
hwrng_unregister(&zcrypt_rng_dev);
free_page((unsigned long) zcrypt_rng_buffer);
}
mutex_unlock(&zcrypt_rng_mutex);
}
/**
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
*/
int __init zcrypt_api_init(void)
@ -1076,6 +1223,8 @@ out:
}
/**
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
*/
void zcrypt_api_exit(void)

View File

@ -43,17 +43,17 @@
#define DEV_NAME "zcrypt"
#define PRINTK(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKN(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": " fmt, ## args)
#define PRINTKW(fmt, args...) \
printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
printk(KERN_WARNING DEV_NAME ": %s -> " fmt, __func__ , ## args)
#define PRINTKC(fmt, args...) \
printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
printk(KERN_CRIT DEV_NAME ": %s -> " fmt, __func__ , ## args)
#ifdef ZCRYPT_DEBUG
#define PDEBUG(fmt, args...) \
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __FUNCTION__ , ## args)
printk(KERN_DEBUG DEV_NAME ": %s -> " fmt, __func__ , ## args)
#else
#define PDEBUG(fmt, args...) do {} while (0)
#endif
@ -100,6 +100,13 @@ struct ica_z90_status {
#define ZCRYPT_CEX2C 5
#define ZCRYPT_CEX2A 6
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
* and stored in a page. Be carefull when increasing this buffer due to size
* limitations for AP requests.
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
struct zcrypt_device;
struct zcrypt_ops {
@ -107,6 +114,7 @@ struct zcrypt_ops {
long (*rsa_modexpo_crt)(struct zcrypt_device *,
struct ica_rsa_modexpo_crt *);
long (*send_cprb)(struct zcrypt_device *, struct ica_xcRB *);
long (*rng)(struct zcrypt_device *, char *);
};
struct zcrypt_device {

View File

@ -174,7 +174,7 @@ static inline int zcrypt_type6_mex_key_de(struct ica_rsa_modexpo *mex,
key->pvtMeHdr = static_pvt_me_hdr;
key->pvtMeSec = static_pvt_me_sec;
key->pubMeSec = static_pub_me_sec;
/**
/*
* In a private key, the modulus doesn't appear in the public
* section. So, an arbitrary public exponent of 0x010001 will be
* used.
@ -338,7 +338,7 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt,
pub = (struct cca_public_sec *)(key->key_parts + key_len);
*pub = static_cca_pub_sec;
pub->modulus_bit_len = 8 * crt->inputdatalength;
/**
/*
* In a private key, the modulus doesn't appear in the public
* section. So, an arbitrary public exponent of 0x010001 will be
* used.

View File

@ -108,7 +108,7 @@ static inline int convert_error(struct zcrypt_device *zdev,
return -EINVAL;
case REP82_ERROR_MESSAGE_TYPE:
// REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
/**
/*
* To sent a message of the wrong type is a bug in the
* device driver. Warn about it, disable the device
* and then repeat the request.

View File

@ -42,7 +42,7 @@
#define PCICC_MAX_MOD_SIZE_OLD 128 /* 1024 bits */
#define PCICC_MAX_MOD_SIZE 256 /* 2048 bits */
/**
/*
* PCICC cards need a speed rating of 0. This keeps them at the end of
* the zcrypt device list (see zcrypt_api.c). PCICC cards are only
* used if no other cards are present because they are slow and can only
@ -388,7 +388,7 @@ static int convert_type86(struct zcrypt_device *zdev,
reply_len = le16_to_cpu(msg->length) - 2;
if (reply_len > outputdatalength)
return -EINVAL;
/**
/*
* For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10.

View File

@ -355,6 +355,55 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return 0;
}
/**
* Prepare a type6 CPRB message for random number generation
*
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
static void rng_type6CPRB_msgX(struct ap_device *ap_dev,
struct ap_message *ap_msg,
unsigned random_number_length)
{
struct {
struct type6_hdr hdr;
struct CPRBX cprbx;
char function_code[2];
short int rule_length;
char rule[8];
short int verb_length;
short int key_length;
} __attribute__((packed)) *msg = ap_msg->message;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A'},
.function_code = {'R', 'L'},
.ToCardLen1 = sizeof *msg - sizeof(msg->hdr),
.FromCardLen1 = sizeof *msg - sizeof(msg->hdr),
};
static struct CPRBX static_cprbx = {
.cprb_len = 0x00dc,
.cprb_ver_id = 0x02,
.func_id = {0x54, 0x32},
.req_parml = sizeof *msg - sizeof(msg->hdr) -
sizeof(msg->cprbx),
.rpl_msgbl = sizeof *msg - sizeof(msg->hdr),
};
msg->hdr = static_type6_hdrX;
msg->hdr.FromCardLen2 = random_number_length,
msg->cprbx = static_cprbx;
msg->cprbx.rpl_datal = random_number_length,
msg->cprbx.domain = AP_QID_QUEUE(ap_dev->qid);
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
ap_msg->length = sizeof *msg;
}
/**
* Copy results from a type 86 ICA reply message back to user space.
*
@ -452,7 +501,7 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
reply_len = msg->length - 2;
if (reply_len > outputdatalength)
return -EINVAL;
/**
/*
* For all encipher requests, the length of the ciphertext (reply_len)
* will always equal the modulus length. For MEX decipher requests
* the output needs to get padded. Minimum pad size is 10.
@ -509,6 +558,26 @@ static int convert_type86_xcrb(struct zcrypt_device *zdev,
return 0;
}
static int convert_type86_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *buffer)
{
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *msg = reply->message;
char *data = reply->message;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0) {
PDEBUG("RNG response error on PCIXCC/CEX2C rc=%hu/rs=%hu\n",
rc, rs);
return -EINVAL;
}
memcpy(buffer, data + msg->fmt2.offset2, msg->fmt2.count2);
return msg->fmt2.count2;
}
static int convert_response_ica(struct zcrypt_device *zdev,
struct ap_message *reply,
char __user *outputdata,
@ -567,6 +636,31 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
}
}
static int convert_response_rng(struct zcrypt_device *zdev,
struct ap_message *reply,
char *data)
{
struct type86x_reply *msg = reply->message;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
return -EINVAL;
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code)
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zdev, reply, data);
/* no break, incorrect cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
PRINTK("Unrecognized Message Header: %08x%08x\n",
*(unsigned int *) reply->message,
*(unsigned int *) (reply->message+4));
zdev->online = 0;
return -EAGAIN; /* repeat the request on a different device. */
}
}
/**
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
@ -735,6 +829,42 @@ out_free:
return rc;
}
/**
* The request distributor calls this function if it picked the PCIXCC/CEX2C
* device to generate random data.
* @zdev: pointer to zcrypt_device structure that identifies the
* PCIXCC/CEX2C device to the request distributor
* @buffer: pointer to a memory page to return random data
*/
static long zcrypt_pcixcc_rng(struct zcrypt_device *zdev,
char *buffer)
{
struct ap_message ap_msg;
struct response_type resp_type = {
.type = PCIXCC_RESPONSE_TYPE_XCRB,
};
int rc;
ap_msg.message = kmalloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE, GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
ap_msg.private = &resp_type;
rng_type6CPRB_msgX(zdev->ap_dev, &ap_msg, ZCRYPT_RNG_BUFFER_SIZE);
init_completion(&resp_type.work);
ap_queue_message(zdev->ap_dev, &ap_msg);
rc = wait_for_completion_interruptible(&resp_type.work);
if (rc == 0)
rc = convert_response_rng(zdev, &ap_msg, buffer);
else
/* Signal pending. */
ap_cancel_message(zdev->ap_dev, &ap_msg);
kfree(ap_msg.message);
return rc;
}
/**
* The crypto operations for a PCIXCC/CEX2C card.
*/
@ -744,6 +874,13 @@ static struct zcrypt_ops zcrypt_pcixcc_ops = {
.send_cprb = zcrypt_pcixcc_send_cprb,
};
static struct zcrypt_ops zcrypt_pcixcc_with_rng_ops = {
.rsa_modexpo = zcrypt_pcixcc_modexpo,
.rsa_modexpo_crt = zcrypt_pcixcc_modexpo_crt,
.send_cprb = zcrypt_pcixcc_send_cprb,
.rng = zcrypt_pcixcc_rng,
};
/**
* Micro-code detection function. Its sends a message to a pcixcc card
* to find out the microcode level.
@ -858,6 +995,58 @@ out_free:
return rc;
}
/**
* Large random number detection function. Its sends a message to a pcixcc
* card to find out if large random numbers are supported.
* @ap_dev: pointer to the AP device.
*
* Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
*/
static int zcrypt_pcixcc_rng_supported(struct ap_device *ap_dev)
{
struct ap_message ap_msg;
unsigned long long psmid;
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
} __attribute__((packed)) *reply;
int rc, i;
ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
if (!ap_msg.message)
return -ENOMEM;
rng_type6CPRB_msgX(ap_dev, &ap_msg, 4);
rc = ap_send(ap_dev->qid, 0x0102030405060708ULL, ap_msg.message,
ap_msg.length);
if (rc)
goto out_free;
/* Wait for the test message to complete. */
for (i = 0; i < 2 * HZ; i++) {
msleep(1000 / HZ);
rc = ap_recv(ap_dev->qid, &psmid, ap_msg.message, 4096);
if (rc == 0 && psmid == 0x0102030405060708ULL)
break;
}
if (i >= 2 * HZ) {
/* Got no answer. */
rc = -ENODEV;
goto out_free;
}
reply = ap_msg.message;
if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
rc = 1;
else
rc = 0;
out_free:
free_page((unsigned long) ap_msg.message);
return rc;
}
/**
* Probe function for PCIXCC/CEX2C cards. It always accepts the AP device
* since the bus_match already checked the hardware type. The PCIXCC
@ -874,7 +1063,6 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
zdev->ops = &zcrypt_pcixcc_ops;
zdev->online = 1;
if (ap_dev->device_type == AP_DEVICE_TYPE_PCIXCC) {
rc = zcrypt_pcixcc_mcl(ap_dev);
@ -901,6 +1089,15 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
zdev->min_mod_size = PCIXCC_MIN_MOD_SIZE;
zdev->max_mod_size = PCIXCC_MAX_MOD_SIZE;
}
rc = zcrypt_pcixcc_rng_supported(ap_dev);
if (rc < 0) {
zcrypt_device_free(zdev);
return rc;
}
if (rc)
zdev->ops = &zcrypt_pcixcc_with_rng_ops;
else
zdev->ops = &zcrypt_pcixcc_ops;
ap_dev->reply = &zdev->reply;
ap_dev->private = zdev;
rc = zcrypt_device_register(zdev);

File diff suppressed because it is too large Load Diff

View File

@ -670,7 +670,7 @@ static void conn_action_rx(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(conn->netdev);
int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
if (!conn->netdev) {
iucv_message_reject(conn->path, msg);
@ -718,7 +718,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
struct ll_header header;
int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
if (conn && conn->netdev)
privptr = netdev_priv(conn->netdev);
@ -799,7 +799,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(netdev);
int rc;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
@ -821,7 +821,7 @@ static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
struct iucv_event *ev = arg;
struct iucv_path *path = ev->data;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
iucv_path_sever(path, NULL);
}
@ -831,7 +831,7 @@ static void conn_action_connack(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
netdev->tx_queue_len = conn->path->msglim;
@ -842,7 +842,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
{
struct iucv_connection *conn = arg;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, NULL);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@ -854,7 +854,7 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
iucv_path_sever(conn->path, NULL);
@ -870,7 +870,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
struct iucv_connection *conn = arg;
int rc;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
PRINT_DEBUG("%s('%s'): connecting ...\n",
@ -948,7 +948,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
struct net_device *netdev = conn->netdev;
struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
@ -1024,7 +1024,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, DEV_STATE_STARTWAIT);
fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
@ -1044,7 +1044,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
struct netiucv_priv *privptr = netdev_priv(dev);
struct iucv_event ev;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
ev.conn = privptr->conn;
@ -1066,7 +1066,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
struct net_device *dev = arg;
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT:
@ -1097,7 +1097,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
static void
dev_action_conndown(fsm_instance *fi, int event, void *arg)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
@ -1288,7 +1288,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
struct netiucv_priv *privptr = netdev_priv(dev);
int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
/**
* Some sanity checks ...
*/
@ -1344,7 +1344,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
{
struct netiucv_priv *priv = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return &priv->stats;
}
@ -1360,7 +1360,7 @@ static struct net_device_stats *netiucv_stats (struct net_device * dev)
*/
static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
return -EINVAL;
@ -1378,7 +1378,7 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
}
@ -1393,7 +1393,7 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
int i;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (count > 9) {
PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
IUCV_DBF_TEXT_(setup, 2,
@ -1449,7 +1449,7 @@ static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
char *buf)
{ struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
@ -1461,7 +1461,7 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
char *e;
int bs1;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= 39)
return -EINVAL;
@ -1513,7 +1513,7 @@ static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
}
@ -1524,7 +1524,7 @@ static ssize_t conn_fsm_show (struct device *dev,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
}
@ -1535,7 +1535,7 @@ static ssize_t maxmulti_show (struct device *dev,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
@ -1545,7 +1545,7 @@ static ssize_t maxmulti_write (struct device *dev,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxmulti = 0;
return count;
}
@ -1557,7 +1557,7 @@ static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
@ -1566,7 +1566,7 @@ static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.maxcqueue = 0;
return count;
}
@ -1578,7 +1578,7 @@ static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
@ -1587,7 +1587,7 @@ static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.doios_single = 0;
return count;
}
@ -1599,7 +1599,7 @@ static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
@ -1608,7 +1608,7 @@ static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
priv->conn->prof.doios_multi = 0;
return count;
}
@ -1620,7 +1620,7 @@ static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
@ -1629,7 +1629,7 @@ static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.txlen = 0;
return count;
}
@ -1641,7 +1641,7 @@ static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
@ -1650,7 +1650,7 @@ static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_time = 0;
return count;
}
@ -1662,7 +1662,7 @@ static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
@ -1671,7 +1671,7 @@ static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_pending = 0;
return count;
}
@ -1683,7 +1683,7 @@ static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
IUCV_DBF_TEXT(trace, 5, __func__);
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
@ -1692,7 +1692,7 @@ static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
{
struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
IUCV_DBF_TEXT(trace, 4, __func__);
priv->conn->prof.tx_max_pending = 0;
return count;
}
@ -1732,7 +1732,7 @@ static int netiucv_add_files(struct device *dev)
{
int ret;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
if (ret)
return ret;
@ -1744,7 +1744,7 @@ static int netiucv_add_files(struct device *dev)
static void netiucv_remove_files(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
}
@ -1756,7 +1756,7 @@ static int netiucv_register_device(struct net_device *ndev)
int ret;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (dev) {
snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
@ -1792,7 +1792,7 @@ out_unreg:
static void netiucv_unregister_device(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
netiucv_remove_files(dev);
device_unregister(dev);
}
@ -1857,7 +1857,7 @@ out:
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
write_unlock_bh(&iucv_connection_rwlock);
@ -1881,7 +1881,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
{
struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (!dev)
return;
@ -1963,7 +1963,7 @@ static ssize_t conn_write(struct device_driver *drv,
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (count>9) {
PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
@ -2048,7 +2048,7 @@ static ssize_t remove_write (struct device_driver *drv,
const char *p;
int i;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
if (count >= IFNAMSIZ)
count = IFNAMSIZ - 1;;
@ -2116,7 +2116,7 @@ static void __exit netiucv_exit(void)
struct netiucv_priv *priv;
struct device *dev;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
IUCV_DBF_TEXT(trace, 3, __func__);
while (!list_empty(&iucv_connection_list)) {
cp = list_entry(iucv_connection_list.next,
struct iucv_connection, list);
@ -2146,8 +2146,7 @@ static int __init netiucv_init(void)
rc = iucv_register(&netiucv_handler, 1);
if (rc)
goto out_dbf;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
netiucv_driver.groups = netiucv_drv_attr_groups;
IUCV_DBF_TEXT(trace, 3, __func__);
rc = driver_register(&netiucv_driver);
if (rc) {
PRINT_ERR("NETIUCV: failed to register driver.\n");

View File

@ -59,15 +59,15 @@ repeat:
printk(KERN_WARNING"%s: Code does not support more "
"than two chained crws; please report to "
"linux390@de.ibm.com!\n", __FUNCTION__);
"linux390@de.ibm.com!\n", __func__);
ccode = stcrw(&tmp_crw);
printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, "
"chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
__FUNCTION__, tmp_crw.slct, tmp_crw.oflw,
__func__, tmp_crw.slct, tmp_crw.oflw,
tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc,
tmp_crw.erc, tmp_crw.rsid);
printk(KERN_WARNING"%s: This was crw number %x in the "
"chain\n", __FUNCTION__, chain);
"chain\n", __func__, chain);
if (ccode != 0)
break;
chain = tmp_crw.chn ? chain + 1 : 0;
@ -83,7 +83,7 @@ repeat:
crw[chain].rsid);
/* Check for overflows. */
if (crw[chain].oflw) {
pr_debug("%s: crw overflow detected!\n", __FUNCTION__);
pr_debug("%s: crw overflow detected!\n", __func__);
css_schedule_eval_all();
chain = 0;
continue;

View File

@ -105,4 +105,8 @@ static inline int stcrw(struct crw *pcrw )
#define ED_ETR_SYNC 12 /* External damage ETR sync check */
#define ED_ETR_SWITCH 13 /* External damage ETR switch to local */
struct pt_regs;
void s390_handle_mcck(void);
void s390_do_machine_check(struct pt_regs *regs);
#endif /* __s390mach */

View File

@ -539,7 +539,7 @@ struct zfcp_rc_entry {
/* logging routine for zfcp */
#define _ZFCP_LOG(fmt, args...) \
printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __FUNCTION__, \
printk(KERN_ERR ZFCP_NAME": %s(%d): " fmt, __func__, \
__LINE__ , ##args)
#define ZFCP_LOG(level, fmt, args...) \

View File

@ -11,111 +11,13 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>
/* Sigh, math-emu. Don't ask. */
#include <asm/sfp-util.h>
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
char type[4];
char reserved_1[12];
char model_capacity[16];
char sequence[16];
char plant[4];
char model[16];
};
struct sysinfo_1_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
char reserved_1[2];
unsigned short cpu_address;
};
struct sysinfo_1_2_2 {
char format;
char reserved_0[1];
unsigned short acc_offset;
char reserved_1[24];
unsigned int secondary_capability;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
unsigned short adjustment[0];
};
struct sysinfo_1_2_2_extension {
unsigned int alt_capability;
unsigned short alt_adjustment[0];
};
struct sysinfo_2_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
unsigned short cpu_id;
unsigned short cpu_address;
};
struct sysinfo_2_2_2 {
char reserved_0[32];
unsigned short lpar_number;
char reserved_1;
unsigned char characteristics;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char reserved_2[16];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
#define LPAR_CHAR_DEDICATED (1 << 7)
#define LPAR_CHAR_SHARED (1 << 6)
#define LPAR_CHAR_LIMITED (1 << 5)
struct sysinfo_3_2_2 {
char reserved_0[31];
unsigned char count;
struct {
char reserved_0[4];
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char cpi[16];
char reserved_1[24];
} vm[8];
};
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
asm volatile(
" stsi 0(%2)\n"
"0: jz 2f\n"
"1: lhi %0,%3\n"
"2:\n"
EX_TABLE(0b,1b)
: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
: "cc", "memory" );
return r0;
}
static inline int stsi_0(void)
{
int rc = stsi (NULL, 0, 0, 0);
@ -133,6 +35,8 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
EBCASC(info->sequence, sizeof(info->sequence));
EBCASC(info->plant, sizeof(info->plant));
EBCASC(info->model_capacity, sizeof(info->model_capacity));
EBCASC(info->model_perm_cap, sizeof(info->model_perm_cap));
EBCASC(info->model_temp_cap, sizeof(info->model_temp_cap));
len += sprintf(page + len, "Manufacturer: %-16.16s\n",
info->manufacturer);
len += sprintf(page + len, "Type: %-4.4s\n",
@ -155,8 +59,18 @@ static int stsi_1_1_1(struct sysinfo_1_1_1 *info, char *page, int len)
info->sequence);
len += sprintf(page + len, "Plant: %-4.4s\n",
info->plant);
len += sprintf(page + len, "Model Capacity: %-16.16s\n",
info->model_capacity);
len += sprintf(page + len, "Model Capacity: %-16.16s %08u\n",
info->model_capacity, *(u32 *) info->model_cap_rating);
if (info->model_perm_cap[0] != '\0')
len += sprintf(page + len,
"Model Perm. Capacity: %-16.16s %08u\n",
info->model_perm_cap,
*(u32 *) info->model_perm_cap_rating);
if (info->model_temp_cap[0] != '\0')
len += sprintf(page + len,
"Model Temp. Capacity: %-16.16s %08u\n",
info->model_temp_cap,
*(u32 *) info->model_temp_cap_rating);
return len;
}

View File

@ -397,6 +397,10 @@ struct cio_iplinfo {
extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
/* Function from drivers/s390/cio/chsc.c */
int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
int chsc_sstpi(void *page, void *result, size_t size);
#endif
#endif

View File

@ -22,4 +22,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
void s390_idle_leave(void);
static inline void s390_idle_check(void)
{
if ((&__get_cpu_var(s390_idle))->in_idle)
s390_idle_leave();
}
#endif /* _ASM_S390_CPU_H_ */

View File

@ -73,6 +73,7 @@ typedef struct debug_info {
struct dentry* debugfs_entries[DEBUG_MAX_VIEWS];
struct debug_view* views[DEBUG_MAX_VIEWS];
char name[DEBUG_MAX_NAME_LEN];
mode_t mode;
} debug_info_t;
typedef int (debug_header_proc_t) (debug_info_t* id,
@ -122,6 +123,10 @@ debug_entry_t* debug_exception_common(debug_info_t* id, int level,
debug_info_t* debug_register(char* name, int pages, int nr_areas,
int buf_size);
debug_info_t *debug_register_mode(char *name, int pages, int nr_areas,
int buf_size, mode_t mode, uid_t uid,
gid_t gid);
void debug_unregister(debug_info_t* id);
void debug_set_level(debug_info_t* id, int new_level);

View File

@ -22,11 +22,12 @@
#define SEGMENT_SHARED 0
#define SEGMENT_EXCLUSIVE 1
extern int segment_load (char *name,int segtype,unsigned long *addr,unsigned long *length);
extern void segment_unload(char *name);
extern void segment_save(char *name);
extern int segment_type (char* name);
extern int segment_modify_shared (char *name, int do_nonshared);
int segment_load (char *name, int segtype, unsigned long *addr, unsigned long *length);
void segment_unload(char *name);
void segment_save(char *name);
int segment_type (char* name);
int segment_modify_shared (char *name, int do_nonshared);
void segment_warning(int rc, char *seg_name);
#endif
#endif

View File

@ -32,6 +32,6 @@ typedef struct {
#define HARDIRQ_BITS 8
extern void account_ticks(u64 time);
void clock_comparator_work(void);
#endif /* __ASM_HARDIRQ_H */

View File

@ -56,6 +56,8 @@
#define __LC_IO_INT_WORD 0x0C0
#define __LC_MCCK_CODE 0x0E8
#define __LC_LAST_BREAK 0x110
#define __LC_RETURN_PSW 0x200
#define __LC_SAVE_AREA 0xC00
@ -80,7 +82,6 @@
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#define __LC_CURRENT 0xC90
#define __LC_INT_CLOCK 0xC98
#else /* __s390x__ */
@ -103,7 +104,6 @@
#define __LC_CPUID 0xD80
#define __LC_CPUADDR 0xD88
#define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8
#endif /* __s390x__ */
@ -276,7 +276,7 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xc80 */
__u64 clock_comparator; /* 0xc80 */
__u32 ext_call_fast; /* 0xc88 */
__u32 percpu_offset; /* 0xc8c */
__u32 current_task; /* 0xc90 */
@ -368,11 +368,12 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer; /* 0xdc0 */
__u64 clock_comparator; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */
__u64 percpu_offset; /* 0xdd0 */
__u64 current_task; /* 0xdd8 */
__u64 softirq_pending; /* 0xde0 */
__u32 softirq_pending; /* 0xde0 */
__u32 pad_0x0de4; /* 0xde4 */
__u64 int_clock; /* 0xde8 */
__u8 pad12[0xe00-0xdf0]; /* 0xdf0 */

View File

@ -175,6 +175,13 @@ extern void task_show_regs(struct seq_file *m, struct task_struct *task);
extern void show_registers(struct pt_regs *regs);
extern void show_code(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
#ifdef CONFIG_64BIT
extern void show_last_breaking_event(struct pt_regs *regs);
#else
static inline void show_last_breaking_event(struct pt_regs *regs)
{
}
#endif
unsigned long get_wchan(struct task_struct *p);
#define task_pt_regs(tsk) ((struct pt_regs *) \

View File

@ -90,6 +90,9 @@ extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
extern struct mutex smp_cpu_state_mutex;
extern int smp_cpu_polarization[];
extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
void *info, int wait);
#endif

116
include/asm-s390/sysinfo.h Normal file
View File

@ -0,0 +1,116 @@
/*
* definition for store system information stsi
*
* Copyright IBM Corp. 2001,2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
* as published by the Free Software Foundation.
*
* Author(s): Ulrich Weigand <weigand@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
struct sysinfo_1_1_1 {
char reserved_0[32];
char manufacturer[16];
char type[4];
char reserved_1[12];
char model_capacity[16];
char sequence[16];
char plant[4];
char model[16];
char model_perm_cap[16];
char model_temp_cap[16];
char model_cap_rating[4];
char model_perm_cap_rating[4];
char model_temp_cap_rating[4];
};
struct sysinfo_1_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
char reserved_1[2];
unsigned short cpu_address;
};
struct sysinfo_1_2_2 {
char format;
char reserved_0[1];
unsigned short acc_offset;
char reserved_1[24];
unsigned int secondary_capability;
unsigned int capability;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
unsigned short adjustment[0];
};
struct sysinfo_1_2_2_extension {
unsigned int alt_capability;
unsigned short alt_adjustment[0];
};
struct sysinfo_2_2_1 {
char reserved_0[80];
char sequence[16];
char plant[4];
unsigned short cpu_id;
unsigned short cpu_address;
};
struct sysinfo_2_2_2 {
char reserved_0[32];
unsigned short lpar_number;
char reserved_1;
unsigned char characteristics;
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char reserved_2[16];
unsigned short cpus_dedicated;
unsigned short cpus_shared;
};
#define LPAR_CHAR_DEDICATED (1 << 7)
#define LPAR_CHAR_SHARED (1 << 6)
#define LPAR_CHAR_LIMITED (1 << 5)
struct sysinfo_3_2_2 {
char reserved_0[31];
unsigned char count;
struct {
char reserved_0[4];
unsigned short cpus_total;
unsigned short cpus_configured;
unsigned short cpus_standby;
unsigned short cpus_reserved;
char name[8];
unsigned int caf;
char cpi[16];
char reserved_1[24];
} vm[8];
};
static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
{
register int r0 asm("0") = (fc << 28) | sel1;
register int r1 asm("1") = sel2;
asm volatile(
" stsi 0(%2)\n"
"0: jz 2f\n"
"1: lhi %0,%3\n"
"2:\n"
EX_TABLE(0b, 1b)
: "+d" (r0) : "d" (r1), "a" (sysinfo), "K" (-ENOSYS)
: "cc", "memory");
return r0;
}

View File

@ -406,6 +406,8 @@ __set_psw_mask(unsigned long mask)
#define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
#define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
int stfle(unsigned long long *list, int doublewords);
#ifdef CONFIG_SMP
extern void smp_ctl_set_bit(int cr, int bit);

View File

@ -62,16 +62,18 @@ static inline unsigned long long get_clock (void)
return clk;
}
static inline void get_clock_extended(void *dest)
static inline unsigned long long get_clock_xt(void)
{
typedef struct { unsigned long long clk[2]; } __clock_t;
unsigned char clk[16];
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
asm volatile("stcke %0" : "=Q" (*((__clock_t *)dest)) : : "cc");
asm volatile("stcke %0" : "=Q" (clk) : : "cc");
#else /* __GNUC__ */
asm volatile("stcke 0(%1)" : "=m" (*((__clock_t *)dest))
: "a" ((__clock_t *)dest) : "cc");
asm volatile("stcke 0(%1)" : "=m" (clk)
: "a" (clk) : "cc");
#endif /* __GNUC__ */
return *((unsigned long long *)&clk[1]);
}
static inline cycles_t get_cycles(void)
@ -81,5 +83,6 @@ static inline cycles_t get_cycles(void)
int get_sync_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
#endif

View File

@ -13,12 +13,14 @@ static inline void __tlb_flush_local(void)
asm volatile("ptlb" : : : "memory");
}
#ifdef CONFIG_SMP
/*
* Flush all tlb entries on all cpus.
*/
void smp_ptlb_all(void);
static inline void __tlb_flush_global(void)
{
extern void smp_ptlb_all(void);
register unsigned long reg2 asm("2");
register unsigned long reg3 asm("3");
register unsigned long reg4 asm("4");
@ -39,6 +41,25 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
}
static inline void __tlb_flush_full(struct mm_struct *mm)
{
cpumask_t local_cpumask;
preempt_disable();
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
__tlb_flush_local();
else
__tlb_flush_global();
preempt_enable();
}
#else
#define __tlb_flush_full(mm) __tlb_flush_local()
#endif
/*
* Flush all tlb entries of a page table on all cpus.
*/
@ -51,8 +72,6 @@ static inline void __tlb_flush_idte(unsigned long asce)
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
cpumask_t local_cpumask;
if (unlikely(cpus_empty(mm->cpu_vm_mask)))
return;
/*
@ -69,16 +88,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
mm->context.asce_bits);
return;
}
preempt_disable();
/*
* If the process only ran on the local cpu, do a local flush.
*/
local_cpumask = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(mm->cpu_vm_mask, local_cpumask))
__tlb_flush_local();
else
__tlb_flush_global();
preempt_enable();
__tlb_flush_full(mm);
}
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)

View File

@ -1,6 +1,29 @@
#ifndef _ASM_S390_TOPOLOGY_H
#define _ASM_S390_TOPOLOGY_H
#include <linux/cpumask.h>
#define mc_capable() (1)
cpumask_t cpu_coregroup_map(unsigned int cpu);
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
#define POLARIZATION_UNKNWN (-1)
#define POLARIZATION_HRZ (0)
#define POLARIZATION_VL (1)
#define POLARIZATION_VM (2)
#define POLARIZATION_VH (3)
#ifdef CONFIG_SMP
void s390_init_cpu_topology(void);
#else
static inline void s390_init_cpu_topology(void)
{
};
#endif
#include <asm-generic/topology.h>
#endif /* _ASM_S390_TOPOLOGY_H */

View File

@ -102,6 +102,25 @@ extern void disable_irq_nosync(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
}
#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
#ifdef CONFIG_GENERIC_HARDIRQS
/*
* Special lockdep variants of irq disabling/enabling.

View File

@ -228,21 +228,11 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
#endif /* CONFIG_GENERIC_PENDING_IRQ */
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask);
extern int irq_can_set_affinity(unsigned int irq);
#else /* CONFIG_SMP */
#define move_native_irq(x)
#define move_masked_irq(x)
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
{
return -EINVAL;
}
static inline int irq_can_set_affinity(unsigned int irq) { return 0; }
#endif /* CONFIG_SMP */
#ifdef CONFIG_IRQBALANCE

View File

@ -14,7 +14,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>

View File

@ -14,12 +14,14 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/tick.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
/*

View File

@ -14,7 +14,7 @@
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>