dect
/
linux-2.6
Archived
13
0
Fork 0

[S390] qdio: new qdio driver.

List of major changes:
- split qdio driver into several files
- seperation of thin interrupt code
- improved handling for multiple thin interrupt devices
- inbound and outbound processing now always runs in tasklet context
- significant less tasklet schedules per interrupt needed
- merged qebsm with non-qebsm handling
- cleanup qdio interface and added kerneldoc
- coding style

Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Reviewed-by: Utz Bacher <utz.bacher@de.ibm.com>
Reviewed-by: Ursula Braun <braunu@de.ibm.com>
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
This commit is contained in:
Jan Glauber 2008-07-17 17:16:48 +02:00 committed by Heiko Carstens
parent dae39843f4
commit 779e6e1c72
20 changed files with 3885 additions and 4954 deletions

View File

@ -9,4 +9,6 @@ ccw_device-objs += device_id.o device_pgid.o device_status.o
obj-y += ccw_device.o cmf.o
obj-$(CONFIG_CHSC_SCH) += chsc_sch.o
obj-$(CONFIG_CCWGROUP) += ccwgroup.o
qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_perf.o qdio_setup.o
obj-$(CONFIG_QDIO) += qdio.o

File diff suppressed because it is too large Load Diff

View File

@ -1,66 +1,20 @@
/*
* linux/drivers/s390/cio/qdio.h
*
* Copyright 2000,2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#ifndef _CIO_QDIO_H
#define _CIO_QDIO_H
#include <asm/page.h>
#include <asm/isc.h>
#include <asm/schid.h>
#include "chsc.h"
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_VERBOSE_LEVEL 9
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_VERBOSE_LEVEL 5
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_USE_PROCESSING_STATE
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
/*
* unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as
* we never know, whether we'll get initiative again, e.g. to give the
* transmit skb's back to the stack, however the stack may be waiting for
* them... therefore we define 4 as threshold to start polling (which
* will stop as soon as the asynchronous queue catches up)
* btw, this only applies to the asynchronous HiperSockets queue
*/
#define IQDIO_FILL_LEVEL_TO_POLL 4
#define TIQDIO_THININT_ISC QDIO_AIRQ_ISC
#define TIQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */
#define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */
#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
#define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */
#define IQDIO_LOCAL_LAPS 4
#define IQDIO_LOCAL_LAPS_INT 1
#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
/*#define IQDIO_IQDC_INT_PARM 0x1234*/
#define QDIO_Q_LAPS 5
#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY
#define L2_CACHELINE_SIZE 256
#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
#define QDIO_PERF "qdio_perf"
/* must be a power of 2 */
/*#define QDIO_STATS_NUMBER 4
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
#define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */
#define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */
#define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */
enum qdio_irq_states {
QDIO_IRQ_STATE_INACTIVE,
@ -72,565 +26,352 @@ enum qdio_irq_states {
NR_QDIO_IRQ_STATES,
};
/* used as intparm in do_IO: */
#define QDIO_DOING_SENSEID 0
#define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_CLEANUP 3
/* used as intparm in do_IO */
#define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_CLEANUP 3
/************************* DEBUG FACILITY STUFF *********************/
#define SLSB_STATE_NOT_INIT 0x0
#define SLSB_STATE_EMPTY 0x1
#define SLSB_STATE_PRIMED 0x2
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_TYPE_INPUT 0x0
#define SLSB_TYPE_OUTPUT 0x20
#define SLSB_OWNER_PROG 0x80
#define SLSB_OWNER_CU 0x40
#define QDIO_DBF_HEX(ex,name,level,addr,len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
else \
debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define QDIO_DBF_TEXT(ex,name,level,text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name,level,text); \
else \
debug_text_event(qdio_dbf_##name,level,text); \
} while (0)
#define SLSB_P_INPUT_NOT_INIT \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_NOT_INIT) /* 0x80 */
#define SLSB_P_INPUT_ACK \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x81 */
#define SLSB_CU_INPUT_EMPTY \
(SLSB_OWNER_CU | SLSB_TYPE_INPUT | SLSB_STATE_EMPTY) /* 0x41 */
#define SLSB_P_INPUT_PRIMED \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_PRIMED) /* 0x82 */
#define SLSB_P_INPUT_HALTED \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_HALTED) /* 0x8e */
#define SLSB_P_INPUT_ERROR \
(SLSB_OWNER_PROG | SLSB_TYPE_INPUT | SLSB_STATE_ERROR) /* 0x8f */
#define SLSB_P_OUTPUT_NOT_INIT \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_NOT_INIT) /* 0xa0 */
#define SLSB_P_OUTPUT_EMPTY \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_EMPTY) /* 0xa1 */
#define SLSB_CU_OUTPUT_PRIMED \
(SLSB_OWNER_CU | SLSB_TYPE_OUTPUT | SLSB_STATE_PRIMED) /* 0x62 */
#define SLSB_P_OUTPUT_HALTED \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_HALTED) /* 0xae */
#define SLSB_P_OUTPUT_ERROR \
(SLSB_OWNER_PROG | SLSB_TYPE_OUTPUT | SLSB_STATE_ERROR) /* 0xaf */
#define SLSB_ERROR_DURING_LOOKUP 0xff
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
/* additional CIWs returned by extended Sense-ID */
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_PAGES 4
#define QDIO_DBF_SETUP_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SETUP_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SETUP_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
/* qdio adapter-characteristics-1 flag */
#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
#define AC1_AUTOMATIC_SYNC_ON_THININT 0x08 /* set by hypervisor */
#define AC1_AUTOMATIC_SYNC_ON_OUT_PCI 0x04 /* set by hypervisor */
#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_PAGES 4
#define QDIO_DBF_SBAL_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SBAL_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SBAL_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_PAGES 16
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_PAGES 4
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_PAGES 2
#define QDIO_DBF_SENSE_NR_AREAS 1
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_SENSE_LEVEL 6
#else /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_SENSE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_OUT_PAGES 256
#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
#define QDIO_DBF_SLSB_OUT_LEVEL 6
#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_IN_PAGES 256
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_STUPID(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>7
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALL(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>6
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_INFO(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_WARN(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ERR(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_CRIT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALERT(x...) do { } while (0)
#endif
#if QDIO_VERBOSE_LEVEL>1
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_EMERG(x...) do { } while (0)
#endif
#define QDIO_HEXDUMP16(importance,header,ptr) \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x\n",*(((char*)ptr)), \
*(((char*)ptr)+1),*(((char*)ptr)+2), \
*(((char*)ptr)+3),*(((char*)ptr)+4), \
*(((char*)ptr)+5),*(((char*)ptr)+6), \
*(((char*)ptr)+7),*(((char*)ptr)+8), \
*(((char*)ptr)+9),*(((char*)ptr)+10), \
*(((char*)ptr)+11),*(((char*)ptr)+12), \
*(((char*)ptr)+13),*(((char*)ptr)+14), \
*(((char*)ptr)+15)); \
QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)+16),*(((char*)ptr)+17), \
*(((char*)ptr)+18),*(((char*)ptr)+19), \
*(((char*)ptr)+20),*(((char*)ptr)+21), \
*(((char*)ptr)+22),*(((char*)ptr)+23), \
*(((char*)ptr)+24),*(((char*)ptr)+25), \
*(((char*)ptr)+26),*(((char*)ptr)+27), \
*(((char*)ptr)+28),*(((char*)ptr)+29), \
*(((char*)ptr)+30),*(((char*)ptr)+31));
/****************** END OF DEBUG FACILITY STUFF *********************/
/*
* Some instructions as assembly
*/
static inline int
do_sqbs(unsigned long sch, unsigned char state, int queue,
unsigned int *start, unsigned int *count)
{
#ifdef CONFIG_64BIT
register unsigned long _ccq asm ("0") = *count;
register unsigned long _sch asm ("1") = sch;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count)
{
register unsigned long _ccq asm ("0") = *count;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_sch)
: "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
asm volatile(
" .insn rsy,0xeb000000008A,%1,0,0(%2)"
: "+d" (_ccq), "+d" (_queuestart)
: "d" ((unsigned long)state), "d" (_token)
: "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
return (_ccq >> 32) & 0xff;
#else
return 0;
#endif
return (_ccq >> 32) & 0xff;
}
static inline int
do_eqbs(unsigned long sch, unsigned char *state, int queue,
unsigned int *start, unsigned int *count)
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count)
{
#ifdef CONFIG_64BIT
register unsigned long _ccq asm ("0") = *count;
register unsigned long _sch asm ("1") = sch;
register unsigned long _token asm ("1") = token;
unsigned long _queuestart = ((unsigned long)queue << 32) | *start;
unsigned long _state = 0;
asm volatile(
" .insn rrf,0xB99c0000,%1,%2,0,0"
: "+d" (_ccq), "+d" (_queuestart), "+d" (_state)
: "d" (_sch)
: "memory", "cc" );
: "d" (_token)
: "memory", "cc");
*count = _ccq & 0xff;
*start = _queuestart & 0xff;
*state = _state & 0xff;
return (_ccq >> 32) & 0xff;
}
#else
return 0;
#endif
}
static inline int do_sqbs(u64 token, unsigned char state, int queue,
int *start, int *count) { return 0; }
static inline int do_eqbs(u64 token, unsigned char *state, int queue,
int *start, int *count) { return 0; }
#endif /* CONFIG_64BIT */
struct qdio_irq;
static inline int
do_siga_sync(struct subchannel_id schid, unsigned int mask1, unsigned int mask2)
{
register unsigned long reg0 asm ("0") = 2;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask1;
register unsigned long reg3 asm ("3") = mask2;
int cc;
struct siga_flag {
u8 input:1;
u8 output:1;
u8 sync:1;
u8 no_sync_ti:1;
u8 no_sync_out_ti:1;
u8 no_sync_out_pci:1;
u8:2;
} __attribute__ ((packed));
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (reg0), "d" (reg1), "d" (reg2), "d" (reg3) : "cc");
return cc;
}
static inline int
do_siga_input(struct subchannel_id schid, unsigned int mask)
{
register unsigned long reg0 asm ("0") = 1;
register struct subchannel_id reg1 asm ("1") = schid;
register unsigned long reg2 asm ("2") = mask;
int cc;
asm volatile(
" siga 0\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
: "d" (reg0), "d" (reg1), "d" (reg2) : "cc", "memory");
return cc;
}
static inline int
do_siga_output(unsigned long schid, unsigned long mask, __u32 *bb,
unsigned int fc)
{
register unsigned long __fc asm("0") = fc;
register unsigned long __schid asm("1") = schid;
register unsigned long __mask asm("2") = mask;
int cc;
asm volatile(
" siga 0\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
EX_TABLE(0b,1b)
: "=d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
: "0" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION)
: "cc", "memory");
(*bb) = ((unsigned int) __fc) >> 31;
return cc;
}
static inline unsigned long
do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
}
/*
* QDIO device commands returned by extended Sense-ID
*/
#define DEFAULT_ESTABLISH_QS_CMD 0x1b
#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
#define DEFAULT_ACTIVATE_QS_CMD 0x1f
#define DEFAULT_ACTIVATE_QS_COUNT 0
/*
* additional CIWs returned by extended Sense-ID
*/
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
#define QDIO_CHSC_RESPONSE_CODE_OK 1
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
struct qdio_chsc_ssqd {
struct chsc_ssqd_area {
struct chsc_header request;
u16 reserved1:10;
u16 ssid:2;
u16 fmt:4;
u16:10;
u8 ssid:2;
u8 fmt:4;
u16 first_sch;
u16 reserved2;
u16:16;
u16 last_sch;
u32 reserved3;
u32:32;
struct chsc_header response;
u32 reserved4;
u8 flags;
u8 reserved5;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pct;
u8 icnt;
u8 reserved7;
u8 ocnt;
u8 reserved8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
u32:32;
struct qdio_ssqd_desc qdio_ssqd;
} __attribute__ ((packed));
struct scssc_area {
struct chsc_header request;
u16 operation_code;
u16:16;
u32:32;
u32:32;
u64 summary_indicator_addr;
u64 subchannel_indicator_addr;
u32 ks:4;
u32 kc:4;
u32:21;
u32 isc:3;
u32 word_with_d_bit;
u32:32;
struct subchannel_id schid;
u32 reserved[1004];
struct chsc_header response;
u32:32;
} __attribute__ ((packed));
struct qdio_input_q {
/* input buffer acknowledgement flag */
int polling;
/* last time of noticing incoming data */
u64 timestamp;
/* lock for clearing the acknowledgement */
spinlock_t lock;
};
struct qdio_perf_stats {
#ifdef CONFIG_64BIT
atomic64_t tl_runs;
atomic64_t outbound_tl_runs;
atomic64_t outbound_tl_runs_resched;
atomic64_t inbound_tl_runs;
atomic64_t inbound_tl_runs_resched;
atomic64_t inbound_thin_tl_runs;
atomic64_t inbound_thin_tl_runs_resched;
struct qdio_output_q {
/* failed siga-w attempts*/
atomic_t busy_siga_counter;
atomic64_t siga_outs;
atomic64_t siga_ins;
atomic64_t siga_syncs;
atomic64_t pcis;
atomic64_t thinints;
atomic64_t fast_reqs;
/* start time of busy condition */
u64 timestamp;
atomic64_t outbound_cnt;
atomic64_t inbound_cnt;
#else /* CONFIG_64BIT */
atomic_t tl_runs;
atomic_t outbound_tl_runs;
atomic_t outbound_tl_runs_resched;
atomic_t inbound_tl_runs;
atomic_t inbound_tl_runs_resched;
atomic_t inbound_thin_tl_runs;
atomic_t inbound_thin_tl_runs_resched;
/* PCIs are enabled for the queue */
int pci_out_enabled;
atomic_t siga_outs;
atomic_t siga_ins;
atomic_t siga_syncs;
atomic_t pcis;
atomic_t thinints;
atomic_t fast_reqs;
atomic_t outbound_cnt;
atomic_t inbound_cnt;
#endif /* CONFIG_64BIT */
/* timer to check for more outbound work */
struct timer_list timer;
};
/* unlikely as the later the better */
#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
#define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \
qdio_siga_sync(q,~0U,~0U)
#define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \
qdio_siga_sync(q,~0U,0)
#define NOW qdio_get_micros()
#define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW
#define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time)
#define SAVE_FRONTIER(q,val) q->last_move_ftc=val
#define GET_SAVED_FRONTIER(q) (q->last_move_ftc)
#define MY_MODULE_STRING(x) #x
#ifdef CONFIG_64BIT
#define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x)
#else /* CONFIG_64BIT */
#define QDIO_GET_ADDR(x) ((__u32)(long)x)
#endif /* CONFIG_64BIT */
struct qdio_q {
volatile struct slsb slsb;
struct slsb slsb;
union {
struct qdio_input_q in;
struct qdio_output_q out;
} u;
char unused[QDIO_MAX_BUFFERS_PER_Q];
/* queue number */
int nr;
__u32 * dev_st_chg_ind;
/* bitmask of queue number */
int mask;
/* input or output queue */
int is_input_q;
struct subchannel_id schid;
struct ccw_device *cdev;
unsigned int is_iqdio_q;
unsigned int is_thinint_q;
/* bit 0 means queue 0, bit 1 means queue 1, ... */
unsigned int mask;
unsigned int q_no;
/* list of thinint input queues */
struct list_head entry;
/* upper-layer program handler */
qdio_handler_t (*handler);
/* points to the next buffer to be checked for having
* been processed by the card (outbound)
* or to the next buffer the program should check for (inbound) */
volatile int first_to_check;
/* and the last time it was: */
volatile int last_move_ftc;
/*
* inbound: next buffer the program should check for
* outbound: next buffer to check for having been processed
* by the card
*/
int first_to_check;
atomic_t number_of_buffers_used;
atomic_t polling;
/* first_to_check of the last time */
int last_move_ftc;
unsigned int siga_in;
unsigned int siga_out;
unsigned int siga_sync;
unsigned int siga_sync_done_on_thinints;
unsigned int siga_sync_done_on_outb_tis;
unsigned int hydra_gives_outbound_pcis;
/* beginning position for calling the program */
int first_to_kick;
/* used to save beginning position when calling dd_handlers */
int first_element_to_kick;
/* number of buffers in use by the adapter */
atomic_t nr_buf_used;
atomic_t use_count;
atomic_t is_in_shutdown;
void *irq_ptr;
struct timer_list timer;
#ifdef QDIO_USE_TIMERS_FOR_POLLING
atomic_t timer_already_set;
spinlock_t timer_lock;
#else /* QDIO_USE_TIMERS_FOR_POLLING */
struct qdio_irq *irq_ptr;
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
enum qdio_irq_states state;
/* used to store the error condition during a data transfer */
/* error condition during a data transfer */
unsigned int qdio_error;
unsigned int siga_error;
unsigned int error_status_flags;
/* list of interesting queues */
volatile struct qdio_q *list_next;
volatile struct qdio_q *list_prev;
struct sl *sl;
volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
unsigned long int_parm;
/*struct {
int in_bh_check_limit;
int threshold;
} threshold_classes[QDIO_STATS_CLASSES];*/
struct {
/* inbound: the time to stop polling
outbound: the time to kick peer */
int threshold; /* the real value */
/* outbound: last time of do_QDIO
inbound: last time of noticing incoming data */
/*__u64 last_transfer_times[QDIO_STATS_NUMBER];
int last_transfer_index; */
__u64 last_transfer_time;
__u64 busy_start;
} timing;
atomic_t busy_siga_counter;
unsigned int queue_type;
unsigned int is_pci_out;
/* leave this member at the end. won't be cleared in qdio_fill_qs */
struct slib *slib; /* a page is allocated under this pointer,
sl points into this page, offset PAGE_SIZE/2
(after slib) */
/*
* Warning: Leave this member at the end so it won't be cleared in
* qdio_fill_qs. A page is allocated under this pointer and used for
* slib and sl. slib is 2048 bytes big and sl points to offset
* PAGE_SIZE / 2.
*/
struct slib *slib;
} __attribute__ ((aligned(256)));
struct qdio_irq {
__u32 * volatile dev_st_chg_ind;
struct qib qib;
u32 *dsci; /* address of device state change indicator */
struct ccw_device *cdev;
unsigned long int_parm;
struct subchannel_id schid;
unsigned int is_iqdio_irq;
unsigned int is_thinint_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
/* QEBSM facility */
unsigned int is_qebsm;
unsigned long sch_token;
unsigned long sch_token; /* QEBSM facility */
enum qdio_irq_states state;
unsigned int no_input_qs;
unsigned int no_output_qs;
struct siga_flag siga_flag; /* siga sync information from qdioac */
unsigned char qdioac;
int nr_input_qs;
int nr_output_qs;
struct ccw1 ccw;
struct ciw equeue;
struct ciw aqueue;
struct qib qib;
void (*original_int_handler) (struct ccw_device *,
unsigned long, struct irb *);
struct qdio_ssqd_desc ssqd_desc;
/* leave these four members together at the end. won't be cleared in qdio_fill_irq */
void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
/*
* Warning: Leave these members together at the end so they won't be
* cleared in qdio_setup_irq.
*/
struct qdr *qdr;
unsigned long chsc_page;
struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
struct semaphore setting_up_sema;
struct mutex setup_mutex;
};
#endif
/* helper functions */
#define queue_type(q) q->irq_ptr->qib.qfmt
#define is_thinint_irq(irq) \
(irq->qib.qfmt == QDIO_IQDIO_QFMT || \
css_general_characteristics.aif_osa)
/* the highest iqdio queue is used for multicast */
static inline int multicast_outbound(struct qdio_q *q)
{
return (q->irq_ptr->nr_output_qs > 1) &&
(q->nr == q->irq_ptr->nr_output_qs - 1);
}
static inline unsigned long long get_usecs(void)
{
return monotonic_clock() >> 12;
}
#define pci_out_supported(q) \
(q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync)
#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci)
#define for_each_input_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->input_qs[0]; \
i < irq_ptr->nr_input_qs; \
q = irq_ptr->input_qs[++i])
#define for_each_output_queue(irq_ptr, q, i) \
for (i = 0, q = irq_ptr->output_qs[0]; \
i < irq_ptr->nr_output_qs; \
q = irq_ptr->output_qs[++i])
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
#define next_buf(bufnr) \
((bufnr + 1) & QDIO_MAX_BUFFERS_MASK)
#define add_buf(bufnr, inc) \
((bufnr + inc) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */
void qdio_sync_after_thinint(struct qdio_q *q);
int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state);
void qdio_check_outbound_after_thinint(struct qdio_q *q);
int qdio_inbound_q_moved(struct qdio_q *q);
void qdio_kick_inbound_handler(struct qdio_q *q);
void qdio_stop_polling(struct qdio_q *q);
int qdio_siga_sync_q(struct qdio_q *q);
void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr);
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr);
void tiqdio_inbound_processing(unsigned long q);
int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
/* prototypes for setup */
void qdio_inbound_processing(unsigned long data);
void qdio_outbound_processing(unsigned long data);
void qdio_outbound_timer(unsigned long data);
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb);
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs,
int nr_output_qs);
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr);
int qdio_setup_irq(struct qdio_initialize *init_data);
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_release_memory(struct qdio_irq *irq_ptr);
int qdio_setup_init(void);
void qdio_setup_exit(void);
#endif /* _CIO_QDIO_H */

View File

@ -0,0 +1,240 @@
/*
* drivers/s390/cio/qdio_debug.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <asm/qdio.h>
#include <asm/debug.h>
#include "qdio_debug.h"
#include "qdio.h"
debug_info_t *qdio_dbf_setup;
debug_info_t *qdio_dbf_trace;
static struct dentry *debugfs_root;
#define MAX_DEBUGFS_QUEUES 32
static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL };
static DEFINE_MUTEX(debugfs_mutex);
void qdio_allocate_do_dbf(struct qdio_initialize *init_data)
{
char dbf_text[20];
sprintf(dbf_text, "qfmt:%x", init_data->q_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8);
sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *));
sprintf(dbf_text, "niq:%4x", init_data->no_input_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
sprintf(dbf_text, "noq:%4x", init_data->no_output_qs);
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long));
QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *));
QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *));
}
static void qdio_unregister_dbf_views(void)
{
if (qdio_dbf_setup)
debug_unregister(qdio_dbf_setup);
if (qdio_dbf_trace)
debug_unregister(qdio_dbf_trace);
}
static int qdio_register_dbf_views(void)
{
qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES,
QDIO_DBF_SETUP_NR_AREAS,
QDIO_DBF_SETUP_LEN);
if (!qdio_dbf_setup)
goto oom;
debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL);
qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES,
QDIO_DBF_TRACE_NR_AREAS,
QDIO_DBF_TRACE_LEN);
if (!qdio_dbf_trace)
goto oom;
debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view);
debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL);
return 0;
oom:
qdio_unregister_dbf_views();
return -ENOMEM;
}
static int qstat_show(struct seq_file *m, void *v)
{
unsigned char state;
struct qdio_q *q = m->private;
int i;
if (!q)
return 0;
seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci);
seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used));
seq_printf(m, "ftc: %d\n", q->first_to_check);
seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc);
seq_printf(m, "polling: %d\n", q->u.in.polling);
seq_printf(m, "slsb buffer states:\n");
qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
get_buf_state(q, i, &state);
switch (state) {
case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT:
seq_printf(m, "N");
break;
case SLSB_P_INPUT_PRIMED:
case SLSB_CU_OUTPUT_PRIMED:
seq_printf(m, "+");
break;
case SLSB_P_INPUT_ACK:
seq_printf(m, "A");
break;
case SLSB_P_INPUT_ERROR:
case SLSB_P_OUTPUT_ERROR:
seq_printf(m, "x");
break;
case SLSB_CU_INPUT_EMPTY:
case SLSB_P_OUTPUT_EMPTY:
seq_printf(m, "-");
break;
case SLSB_P_INPUT_HALTED:
case SLSB_P_OUTPUT_HALTED:
seq_printf(m, ".");
break;
default:
seq_printf(m, "?");
}
if (i == 63)
seq_printf(m, "\n");
}
seq_printf(m, "\n");
return 0;
}
static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct seq_file *seq = file->private_data;
struct qdio_q *q = seq->private;
if (!q)
return 0;
if (q->is_input_q)
xchg(q->irq_ptr->dsci, 1);
local_bh_disable();
tasklet_schedule(&q->tasklet);
local_bh_enable();
return count;
}
static int qstat_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qstat_show,
filp->f_path.dentry->d_inode->i_private);
}
static void get_queue_name(struct qdio_q *q, struct ccw_device *cdev, char *name)
{
memset(name, 0, sizeof(name));
sprintf(name, "%s", cdev->dev.bus_id);
if (q->is_input_q)
sprintf(name + strlen(name), "_input");
else
sprintf(name + strlen(name), "_output");
sprintf(name + strlen(name), "_%d", q->nr);
}
static void remove_debugfs_entry(struct qdio_q *q)
{
int i;
for (i = 0; i < MAX_DEBUGFS_QUEUES; i++) {
if (!debugfs_queues[i])
continue;
if (debugfs_queues[i]->d_inode->i_private == q) {
debugfs_remove(debugfs_queues[i]);
debugfs_queues[i] = NULL;
}
}
}
static struct file_operations debugfs_fops = {
.owner = THIS_MODULE,
.open = qstat_seq_open,
.read = seq_read,
.write = qstat_seq_write,
.llseek = seq_lseek,
.release = single_release,
};
static void setup_debugfs_entry(struct qdio_q *q, struct ccw_device *cdev)
{
int i = 0;
char name[40];
while (debugfs_queues[i] != NULL) {
i++;
if (i >= MAX_DEBUGFS_QUEUES)
return;
}
get_queue_name(q, cdev, name);
debugfs_queues[i] = debugfs_create_file(name, S_IFREG | S_IRUGO | S_IWUSR,
debugfs_root, q, &debugfs_fops);
}
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
for_each_output_queue(irq_ptr, q, i)
setup_debugfs_entry(q, cdev);
mutex_unlock(&debugfs_mutex);
}
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cdev)
{
struct qdio_q *q;
int i;
mutex_lock(&debugfs_mutex);
for_each_input_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
for_each_output_queue(irq_ptr, q, i)
remove_debugfs_entry(q);
mutex_unlock(&debugfs_mutex);
}
int __init qdio_debug_init(void)
{
debugfs_root = debugfs_create_dir("qdio_queues", NULL);
return qdio_register_dbf_views();
}
void qdio_debug_exit(void)
{
debugfs_remove(debugfs_root);
qdio_unregister_dbf_views();
}

View File

@ -0,0 +1,91 @@
/*
* drivers/s390/cio/qdio_debug.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_DEBUG_H
#define QDIO_DEBUG_H
#include <asm/debug.h>
#include <asm/qdio.h>
#include "qdio.h"
#define QDIO_DBF_HEX(ex, name, level, addr, len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \
else \
debug_event(qdio_dbf_##name, level, (void *)(addr), len); \
} while (0)
#define QDIO_DBF_TEXT(ex, name, level, text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name, level, text); \
else \
debug_text_event(qdio_dbf_##name, level, text); \
} while (0)
#define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len)
#define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len)
#define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len)
#define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len)
#define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len)
#define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len)
#else
#define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0)
#define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text)
#define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text)
#define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text)
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text)
#define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text)
#define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text)
#define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text)
#else
#define QDIO_DBF_TEXT3(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT4(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT5(ex, name, text) do {} while (0)
#define QDIO_DBF_TEXT6(ex, name, text) do {} while (0)
#endif /* CONFIG_QDIO_DEBUG */
/* s390dbf views */
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_PAGES 4
#define QDIO_DBF_SETUP_NR_AREAS 1
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef CONFIG_QDIO_DEBUG
#define QDIO_DBF_TRACE_PAGES 16
#define QDIO_DBF_SETUP_LEVEL 6
#define QDIO_DBF_TRACE_LEVEL 4
#else /* !CONFIG_QDIO_DEBUG */
#define QDIO_DBF_TRACE_PAGES 4
#define QDIO_DBF_SETUP_LEVEL 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* CONFIG_QDIO_DEBUG */
extern debug_info_t *qdio_dbf_setup;
extern debug_info_t *qdio_dbf_trace;
void qdio_allocate_do_dbf(struct qdio_initialize *init_data);
void debug_print_bstat(struct qdio_q *q);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr,
struct ccw_device *cdev);
int qdio_debug_init(void);
void qdio_debug_exit(void);
#endif

1755
drivers/s390/cio/qdio_main.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,151 @@
/*
* drivers/s390/cio/qdio_perf.c
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/ccwdev.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
int qdio_performance_stats;
struct qdio_perf_stats perf_stats;
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *qdio_perf_pde;
#endif
inline void qdio_perf_stat_inc(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_inc(count);
}
inline void qdio_perf_stat_dec(atomic_long_t *count)
{
if (qdio_performance_stats)
atomic_long_dec(count);
}
/*
* procfs functions
*/
static int qdio_perf_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.qdio_int));
seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.pci_int));
seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.thin_int));
seq_printf(m, "\n");
seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_inbound));
seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.tasklet_outbound));
seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.tasklet_thinint),
(long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound),
(long)atomic_long_read(&perf_stats.thinint_inbound_loop));
seq_printf(m, "\n");
seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_in));
seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_out));
seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.siga_sync));
seq_printf(m, "\n");
seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.inbound_handler));
seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.outbound_handler));
seq_printf(m, "\n");
seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
(long)atomic_long_read(&perf_stats.fast_requeue));
seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_tl_out_timer));
seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
(long)atomic_long_read(&perf_stats.debug_stop_polling));
seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
(long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
seq_printf(m, "\n");
return 0;
}
static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
{
return single_open(filp, qdio_perf_proc_show, NULL);
}
static struct file_operations qdio_perf_proc_fops = {
.owner = THIS_MODULE,
.open = qdio_perf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/*
* sysfs functions
*/
static ssize_t qdio_perf_stats_show(struct bus_type *bus, char *buf)
{
return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
}
static ssize_t qdio_perf_stats_store(struct bus_type *bus,
const char *buf, size_t count)
{
unsigned long i;
if (strict_strtoul(buf, 16, &i) != 0)
return -EINVAL;
if ((i != 0) && (i != 1))
return -EINVAL;
if (i == qdio_performance_stats)
return count;
qdio_performance_stats = i;
/* reset performance statistics */
if (i == 0)
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
return count;
}
static BUS_ATTR(qdio_performance_stats, 0644, qdio_perf_stats_show,
qdio_perf_stats_store);
int __init qdio_setup_perf_stats(void)
{
int rc;
rc = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
if (rc)
return rc;
#ifdef CONFIG_PROC_FS
memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
qdio_perf_pde = proc_create("qdio_perf", S_IFREG | S_IRUGO,
NULL, &qdio_perf_proc_fops);
#endif
return 0;
}
void __exit qdio_remove_perf_stats(void)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("qdio_perf", NULL);
#endif
bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
}

View File

@ -0,0 +1,54 @@
/*
* drivers/s390/cio/qdio_perf.h
*
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
#ifndef QDIO_PERF_H
#define QDIO_PERF_H
#include <linux/types.h>
#include <linux/device.h>
#include <asm/atomic.h>
struct qdio_perf_stats {
/* interrupt handler calls */
atomic_long_t qdio_int;
atomic_long_t pci_int;
atomic_long_t thin_int;
/* tasklet runs */
atomic_long_t tasklet_inbound;
atomic_long_t tasklet_outbound;
atomic_long_t tasklet_thinint;
atomic_long_t tasklet_thinint_loop;
atomic_long_t thinint_inbound;
atomic_long_t thinint_inbound_loop;
atomic_long_t thinint_inbound_loop2;
/* signal adapter calls */
atomic_long_t siga_out;
atomic_long_t siga_in;
atomic_long_t siga_sync;
/* misc */
atomic_long_t inbound_handler;
atomic_long_t outbound_handler;
atomic_long_t fast_requeue;
/* for debugging */
atomic_long_t debug_tl_out_timer;
atomic_long_t debug_stop_polling;
};
extern struct qdio_perf_stats perf_stats;
extern int qdio_performance_stats;
int qdio_setup_perf_stats(void);
void qdio_remove_perf_stats(void);
extern void qdio_perf_stat_inc(atomic_long_t *count);
extern void qdio_perf_stat_dec(atomic_long_t *count);
#endif

View File

@ -0,0 +1,521 @@
/*
* driver/s390/cio/qdio_setup.c
*
* qdio queue initialization
*
* Copyright (C) IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/qdio.h>
#include "cio.h"
#include "css.h"
#include "device.h"
#include "ioasm.h"
#include "chsc.h"
#include "qdio.h"
#include "qdio_debug.h"
static struct kmem_cache *qdio_q_cache;
/*
* qebsm is only available under 64bit but the adapter sets the feature
* flag anyway, so we manually override it.
*/
static inline int qebsm_possible(void)
{
#ifdef CONFIG_64BIT
return css_general_characteristics.qebsm;
#endif
return 0;
}
/*
* qib_param_field: pointer to 128 bytes or NULL, if no param field
* nr_input_qs: pointer to nr_queues*128 words of data or NULL
*/
static void set_impl_params(struct qdio_irq *irq_ptr,
unsigned int qib_param_field_format,
unsigned char *qib_param_field,
unsigned long *input_slib_elements,
unsigned long *output_slib_elements)
{
struct qdio_q *q;
int i, j;
if (!irq_ptr)
return;
WARN_ON((unsigned long)&irq_ptr->qib & 0xff);
irq_ptr->qib.pfmt = qib_param_field_format;
if (qib_param_field)
memcpy(irq_ptr->qib.parm, qib_param_field,
QDIO_MAX_BUFFERS_PER_Q);
if (!input_slib_elements)
goto output;
for_each_input_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
output:
if (!output_slib_elements)
return;
for_each_output_queue(irq_ptr, q, i) {
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->slib->slibe[j].parms =
output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
}
}
static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
{
struct qdio_q *q;
int i;
for (i = 0; i < nr_queues; i++) {
q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
WARN_ON((unsigned long)q & 0xff);
q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
if (!q->slib) {
kmem_cache_free(qdio_q_cache, q);
return -ENOMEM;
}
WARN_ON((unsigned long)q->slib & 0x7ff);
irq_ptr_qs[i] = q;
}
return 0;
}
int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
{
int rc;
rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
if (rc)
return rc;
rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
return rc;
}
static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
qdio_handler_t *handler, int i)
{
/* must be cleared by every qdio_establish */
memset(q, 0, ((char *)&q->slib) - ((char *)q));
memset(q->slib, 0, PAGE_SIZE);
q->irq_ptr = irq_ptr;
q->mask = 1 << (31 - i);
q->nr = i;
q->handler = handler;
}
static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
void **sbals_array, char *dbf_text, int i)
{
struct qdio_q *prev;
int j;
QDIO_DBF_TEXT0(0, setup, dbf_text);
QDIO_DBF_HEX0(0, setup, &q, sizeof(void *));
q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
/* fill in sbal */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) {
q->sbal[j] = *sbals_array++;
WARN_ON((unsigned long)q->sbal[j] & 0xff);
}
/* fill in slib */
if (i > 0) {
prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
: irq_ptr->output_qs[i - 1];
prev->slib->nsliba = (unsigned long)q->slib;
}
q->slib->sla = (unsigned long)q->sl;
q->slib->slsba = (unsigned long)&q->slsb.val[0];
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
q->sl->element[j].sbal = (unsigned long)q->sbal[j];
QDIO_DBF_TEXT2(0, setup, "sl-sb-b0");
QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *));
QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *));
QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *));
}
static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
char dbf_text[20];
struct qdio_q *q;
void **input_sbal_array = qdio_init->input_sbal_addr_array;
void **output_sbal_array = qdio_init->output_sbal_addr_array;
int i;
sprintf(dbf_text, "qfqs%4x", qdio_init->cdev->private->schid.sch_no);
QDIO_DBF_TEXT0(0, setup, dbf_text);
for_each_input_queue(irq_ptr, q, i) {
sprintf(dbf_text, "in-q%4x", i);
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
spin_lock_init(&q->u.in.lock);
setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
if (is_thinint_irq(irq_ptr))
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
(unsigned long) q);
else
tasklet_init(&q->tasklet, qdio_inbound_processing,
(unsigned long) q);
}
for_each_output_queue(irq_ptr, q, i) {
sprintf(dbf_text, "outq%4x", i);
setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
q->is_input_q = 0;
setup_storage_lists(q, irq_ptr, output_sbal_array,
dbf_text, i);
output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
setup_timer(&q->u.out.timer, (void(*)(unsigned long))
&qdio_outbound_timer, (unsigned long)q);
}
}
static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
{
if (qdioac & AC1_SIGA_INPUT_NEEDED)
irq_ptr->siga_flag.input = 1;
if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
irq_ptr->siga_flag.output = 1;
if (qdioac & AC1_SIGA_SYNC_NEEDED)
irq_ptr->siga_flag.sync = 1;
if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)
irq_ptr->siga_flag.no_sync_ti = 1;
if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)
irq_ptr->siga_flag.no_sync_out_pci = 1;
if (irq_ptr->siga_flag.no_sync_out_pci &&
irq_ptr->siga_flag.no_sync_ti)
irq_ptr->siga_flag.no_sync_out_ti = 1;
}
static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
unsigned char qdioac, unsigned long token)
{
char dbf_text[15];
if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
goto no_qebsm;
if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
(!(qdioac & AC1_SC_QEBSM_ENABLED)))
goto no_qebsm;
irq_ptr->sch_token = token;
QDIO_DBF_TEXT0(0, setup, "V=V:1");
sprintf(dbf_text, "%8lx", irq_ptr->sch_token);
QDIO_DBF_TEXT0(0, setup, dbf_text);
return;
no_qebsm:
irq_ptr->sch_token = 0;
irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
QDIO_DBF_TEXT0(0, setup, "noV=V");
}
static int __get_ssqd_info(struct qdio_irq *irq_ptr)
{
struct chsc_ssqd_area *ssqd;
int rc;
QDIO_DBF_TEXT0(0, setup, "getssqd");
ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
memset(ssqd, 0, PAGE_SIZE);
ssqd->request = (struct chsc_header) {
.length = 0x0010,
.code = 0x0024,
};
ssqd->first_sch = irq_ptr->schid.sch_no;
ssqd->last_sch = irq_ptr->schid.sch_no;
ssqd->ssid = irq_ptr->schid.ssid;
if (chsc(ssqd))
return -EIO;
rc = chsc_error_from_response(ssqd->response.code);
if (rc)
return rc;
if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
!(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
(ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no))
return -EINVAL;
memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd,
sizeof(struct qdio_ssqd_desc));
return 0;
}
void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
{
unsigned char qdioac;
char dbf_text[15];
int rc;
rc = __get_ssqd_info(irq_ptr);
if (rc) {
QDIO_DBF_TEXT2(0, setup, "ssqdasig");
sprintf(dbf_text, "schno%x", irq_ptr->schid.sch_no);
QDIO_DBF_TEXT2(0, setup, dbf_text);
sprintf(dbf_text, "rc:%d", rc);
QDIO_DBF_TEXT2(0, setup, dbf_text);
/* all flags set, worst case */
qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
AC1_SIGA_SYNC_NEEDED;
} else
qdioac = irq_ptr->ssqd_desc.qdioac1;
check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
process_ac_flags(irq_ptr, qdioac);
sprintf(dbf_text, "qdioac%2x", qdioac);
QDIO_DBF_TEXT2(0, setup, dbf_text);
}
void qdio_release_memory(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/*
* Must check queue array manually since irq_ptr->nr_input_queues /
* irq_ptr->nr_input_queues may not yet be set.
*/
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->input_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
q = irq_ptr->output_qs[i];
if (q) {
free_page((unsigned long) q->slib);
kmem_cache_free(qdio_q_cache, q);
}
}
kfree(irq_ptr->qdr);
free_page(irq_ptr->chsc_page);
free_page((unsigned long) irq_ptr);
}
static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
struct qdio_q **irq_ptr_qs,
int i, int nr)
{
irq_ptr->qdr->qdf0[i + nr].sliba =
(unsigned long)irq_ptr_qs[i]->slib;
irq_ptr->qdr->qdf0[i + nr].sla =
(unsigned long)irq_ptr_qs[i]->sl;
irq_ptr->qdr->qdf0[i + nr].slsba =
(unsigned long)&irq_ptr_qs[i]->slsb.val[0];
irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY;
irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY;
}
static void setup_qdr(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
int i;
irq_ptr->qdr->qfmt = qdio_init->q_format;
irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY;
for (i = 0; i < qdio_init->no_input_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
for (i = 0; i < qdio_init->no_output_qs; i++)
__qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
qdio_init->no_input_qs);
}
static void setup_qib(struct qdio_irq *irq_ptr,
struct qdio_initialize *init_data)
{
if (qebsm_possible())
irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
irq_ptr->qib.qfmt = init_data->q_format;
if (init_data->no_input_qs)
irq_ptr->qib.isliba =
(unsigned long)(irq_ptr->input_qs[0]->slib);
if (init_data->no_output_qs)
irq_ptr->qib.osliba =
(unsigned long)(irq_ptr->output_qs[0]->slib);
memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
}
int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
int rc;
memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr));
/* wipes qib.ac, required by ar7063 */
memset(irq_ptr->qdr, 0, sizeof(struct qdr));
irq_ptr->int_parm = init_data->int_parm;
irq_ptr->nr_input_qs = init_data->no_input_qs;
irq_ptr->nr_output_qs = init_data->no_output_qs;
irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
irq_ptr->cdev = init_data->cdev;
setup_queues(irq_ptr, init_data);
setup_qib(irq_ptr, init_data);
qdio_setup_thinint(irq_ptr);
set_impl_params(irq_ptr, init_data->qib_param_field_format,
init_data->qib_param_field,
init_data->input_slib_elements,
init_data->output_slib_elements);
/* fill input and output descriptors */
setup_qdr(irq_ptr, init_data);
/* qdr, qib, sls, slsbs, slibs, sbales are filled now */
/* get qdio commands */
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
QDIO_DBF_TEXT2(1, setup, "no eq");
rc = -EINVAL;
goto out_err;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
QDIO_DBF_TEXT2(1, setup, "no aq");
rc = -EINVAL;
goto out_err;
}
irq_ptr->aqueue = *ciw;
/* set new interrupt handler */
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
return 0;
out_err:
qdio_release_memory(irq_ptr);
return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
struct ccw_device *cdev)
{
char s[80];
sprintf(s, "%s ", cdev->dev.bus_id);
switch (irq_ptr->qib.qfmt) {
case QDIO_QETH_QFMT:
sprintf(s + strlen(s), "OSADE ");
break;
case QDIO_ZFCP_QFMT:
sprintf(s + strlen(s), "ZFCP ");
break;
case QDIO_IQDIO_QFMT:
sprintf(s + strlen(s), "HiperSockets ");
break;
}
sprintf(s + strlen(s), "using: ");
if (!is_thinint_irq(irq_ptr))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "AdapterInterrupts ");
if (!(irq_ptr->sch_token != 0))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "QEBSM ");
if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "OutboundPCI ");
if (!css_general_characteristics.aif_tdd)
sprintf(s + strlen(s), "no");
sprintf(s + strlen(s), "TDD\n");
printk(KERN_INFO "qdio: %s", s);
memset(s, 0, sizeof(s));
sprintf(s, "%s SIGA required: ", cdev->dev.bus_id);
if (irq_ptr->siga_flag.input)
sprintf(s + strlen(s), "Read ");
if (irq_ptr->siga_flag.output)
sprintf(s + strlen(s), "Write ");
if (irq_ptr->siga_flag.sync)
sprintf(s + strlen(s), "Sync ");
if (!irq_ptr->siga_flag.no_sync_ti)
sprintf(s + strlen(s), "SyncAI ");
if (!irq_ptr->siga_flag.no_sync_out_ti)
sprintf(s + strlen(s), "SyncOutAI ");
if (!irq_ptr->siga_flag.no_sync_out_pci)
sprintf(s + strlen(s), "SyncOutPCI");
sprintf(s + strlen(s), "\n");
printk(KERN_INFO "qdio: %s", s);
}
int __init qdio_setup_init(void)
{
char dbf_text[15];
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
/* Check for OSA/FCP thin interrupts (bit 67). */
sprintf(dbf_text, "thini%1x",
(css_general_characteristics.aif_osa) ? 1 : 0);
QDIO_DBF_TEXT0(0, setup, dbf_text);
/* Check for QEBSM support in general (bit 58). */
sprintf(dbf_text, "cssQBS:%1x",
(qebsm_possible()) ? 1 : 0);
QDIO_DBF_TEXT0(0, setup, dbf_text);
return 0;
}
void __exit qdio_setup_exit(void)
{
kmem_cache_destroy(qdio_q_cache);
}

View File

@ -0,0 +1,380 @@
/*
* linux/drivers/s390/cio/thinint_qdio.c
*
* thin interrupt support for qdio
*
* Copyright 2000-2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/io.h>
#include <asm/atomic.h>
#include <asm/debug.h>
#include <asm/qdio.h>
#include <asm/airq.h>
#include <asm/isc.h>
#include "cio.h"
#include "ioasm.h"
#include "qdio.h"
#include "qdio_debug.h"
#include "qdio_perf.h"
/*
* Restriction: only 63 iqdio subchannels would have its own indicator,
* after that, subsequent subchannels share one indicator
*/
#define TIQDIO_NR_NONSHARED_IND 63
#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
#define TIQDIO_SHARED_IND 63
/* list of thin interrupt input queues */
static LIST_HEAD(tiq_list);
/* adapter local summary indicator */
static unsigned char *tiqdio_alsi;
/* device state change indicators */
struct indicator_t {
u32 ind; /* u32 because of compare-and-swap performance */
atomic_t count; /* use count, 0 or 1 for non-shared indicators */
};
static struct indicator_t *q_indicators;
static void tiqdio_tasklet_fn(unsigned long data);
static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void)
{
register unsigned long __fn asm("1") = 3;
register unsigned long __tmp asm("2");
register unsigned long __time asm("3");
asm volatile(
" .insn rre,0xb2650000,2,0"
: "+d" (__fn), "=d" (__tmp), "=d" (__time));
return __time;
}
/* returns addr for the device state change indicator */
static u32 *get_indicator(void)
{
int i;
for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++)
if (!atomic_read(&q_indicators[i].count)) {
atomic_set(&q_indicators[i].count, 1);
return &q_indicators[i].ind;
}
/* use the shared indicator */
atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count);
return &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void put_indicator(u32 *addr)
{
int i;
if (!addr)
return;
i = ((unsigned long)addr - (unsigned long)q_indicators) /
sizeof(struct indicator_t);
atomic_dec(&q_indicators[i].count);
}
void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
/* No TDD facility? If we must use SIGA-s we can also omit SVS. */
if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
css_qdio_omit_svs = 1;
for_each_input_queue(irq_ptr, q, i) {
list_add_rcu(&q->entry, &tiq_list);
synchronize_rcu();
}
xchg(irq_ptr->dsci, 1);
tasklet_schedule(&tiqdio_tasklet);
}
/*
* we cannot stop the tiqdio tasklet here since it is for all
* thinint qdio devices and it must run as long as there is a
* thinint device left
*/
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{
struct qdio_q *q;
int i;
for_each_input_queue(irq_ptr, q, i) {
list_del_rcu(&q->entry);
synchronize_rcu();
}
}
static inline int tiqdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state;
if (!atomic_read(&q->nr_buf_used))
return 1;
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state);
if (state == SLSB_P_INPUT_PRIMED)
/* more work coming */
return 0;
return 1;
}
static inline int shared_ind(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
qdio_perf_stat_inc(&perf_stats.thinint_inbound);
qdio_sync_after_thinint(q);
/*
* Maybe we have work on our outbound queues... at least
* we have to check the PCI capable queues.
*/
qdio_check_outbound_after_thinint(q);
again:
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_inbound_handler(q);
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
goto again;
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
goto again;
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
/* check for work on all inbound thinint queues */
static void tiqdio_tasklet_fn(unsigned long data)
{
struct qdio_q *q;
qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
again:
/* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock();
list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */
if (*q->irq_ptr->dsci) {
/* only clear it if the indicator is non-shared */
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
/*
* don't call inbound processing directly since
* that could starve other thinint queues
*/
tasklet_schedule(&q->tasklet);
}
rcu_read_unlock();
/*
* if we used the shared indicator clear it now after all queues
* were processed
*/
if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) {
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
/* prevent racing */
if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
}
/* check for more work */
if (*tiqdio_alsi) {
xchg(tiqdio_alsi, 0);
qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
goto again;
}
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @ind: pointer to adapter local summary indicator
* @drv_data: NULL
*/
static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
qdio_perf_stat_inc(&perf_stats.thin_int);
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
/*
* reset local summary indicator (tiqdio_alsi) to stop adapter
* interrupts for now, the tasklet will clean all dsci's
*/
xchg((u8 *)ind, 0);
tasklet_hi_schedule(&tiqdio_tasklet);
}
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct scssc_area *scssc_area;
char dbf_text[15];
void *ptr;
int rc;
scssc_area = (struct scssc_area *)irq_ptr->chsc_page;
memset(scssc_area, 0, PAGE_SIZE);
if (reset) {
scssc_area->summary_indicator_addr = 0;
scssc_area->subchannel_indicator_addr = 0;
} else {
scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi);
scssc_area->subchannel_indicator_addr =
virt_to_phys(irq_ptr->dsci);
}
scssc_area->request = (struct chsc_header) {
.length = 0x0fe0,
.code = 0x0021,
};
scssc_area->operation_code = 0;
scssc_area->ks = PAGE_DEFAULT_KEY;
scssc_area->kc = PAGE_DEFAULT_KEY;
scssc_area->isc = QDIO_AIRQ_ISC;
scssc_area->schid = irq_ptr->schid;
/* enable the time delay disablement facility */
if (css_general_characteristics.aif_tdd)
scssc_area->word_with_d_bit = 0x10000000;
rc = chsc(scssc_area);
if (rc)
return -EIO;
rc = chsc_error_from_response(scssc_area->response.code);
if (rc) {
sprintf(dbf_text, "sidR%4x", scssc_area->response.code);
QDIO_DBF_TEXT1(0, trace, dbf_text);
QDIO_DBF_TEXT1(0, setup, dbf_text);
ptr = &scssc_area->response;
QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN);
return rc;
}
QDIO_DBF_TEXT2(0, setup, "setscind");
QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr,
sizeof(unsigned long));
QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr,
sizeof(unsigned long));
return 0;
}
/* allocate non-shared indicators and shared indicator */
int __init tiqdio_allocate_memory(void)
{
q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS,
GFP_KERNEL);
if (!q_indicators)
return -ENOMEM;
return 0;
}
void tiqdio_free_memory(void)
{
kfree(q_indicators);
}
int __init tiqdio_register_thinints(void)
{
char dbf_text[20];
isc_register(QDIO_AIRQ_ISC);
tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler,
NULL, QDIO_AIRQ_ISC);
if (IS_ERR(tiqdio_alsi)) {
sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi));
QDIO_DBF_TEXT0(0, setup, dbf_text);
tiqdio_alsi = NULL;
isc_unregister(QDIO_AIRQ_ISC);
return -ENOMEM;
}
return 0;
}
int qdio_establish_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
/* Check for aif time delay disablement. If installed,
* omit SVS even under LPAR
*/
if (css_general_characteristics.aif_tdd)
css_qdio_omit_svs = 1;
return set_subchannel_ind(irq_ptr, 0);
}
void qdio_setup_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
irq_ptr->dsci = get_indicator();
QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *));
}
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return;
/* reset adapter interrupt indicators */
put_indicator(irq_ptr->dsci);
set_subchannel_ind(irq_ptr, 1);
}
void __exit tiqdio_unregister_thinints(void)
{
tasklet_disable(&tiqdio_tasklet);
if (tiqdio_alsi) {
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC);
}
}

View File

@ -239,11 +239,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/*not used unless the microcode gets patched*/
#define QETH_PCI_TIMER_VALUE(card) 3
#define QETH_MIN_INPUT_THRESHOLD 1
#define QETH_MAX_INPUT_THRESHOLD 500
#define QETH_MIN_OUTPUT_THRESHOLD 1
#define QETH_MAX_OUTPUT_THRESHOLD 300
/* priority queing */
#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
#define QETH_DEFAULT_QUEUE 2
@ -811,17 +806,14 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
unsigned int, const char *);
int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
struct qeth_hdr **);
void qeth_schedule_recovery(struct qeth_card *);
void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
unsigned int, unsigned int,
unsigned int, int, int,
unsigned long);
int, int, int, unsigned long);
void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);

View File

@ -2073,7 +2073,7 @@ static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
static int qeth_qdio_activate(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 3, "qdioact");
return qdio_activate(CARD_DDEV(card), 0);
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
@ -2349,16 +2349,11 @@ int qeth_init_qdio_queues(struct qeth_card *card)
card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1, NULL);
card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
rc = qdio_synchronize(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0,
@ -2559,9 +2554,9 @@ int qeth_query_setadapterparms(struct qeth_card *card)
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
unsigned int siga_error, const char *dbftext)
const char *dbftext)
{
if (qdio_error || siga_error) {
if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
QETH_DBF_TEXT(QERR, 2, dbftext);
QETH_DBF_TEXT_(QERR, 2, " F15=%02X",
@ -2569,7 +2564,6 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
QETH_DBF_TEXT_(QERR, 2, " serr=%X", siga_error);
return 1;
}
return 0;
@ -2622,9 +2616,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
card->perf_stats.inbound_do_qdio_start_time =
qeth_get_micros();
}
rc = do_QDIO(CARD_DDEV(card),
QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, queue->next_buf_to_init, count, NULL);
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
if (card->options.performance_stats)
card->perf_stats.inbound_do_qdio_time +=
qeth_get_micros() -
@ -2643,14 +2636,13 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err,
unsigned int siga_err)
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].flags & 0xff;
int cc = siga_err & 3;
int cc = qdio_err & 3;
QETH_DBF_TEXT(TRACE, 6, "hdsnderr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, siga_err, "qouterr");
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
switch (cc) {
case 0:
if (qdio_err) {
@ -2662,7 +2654,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
}
return QETH_SEND_ERROR_NONE;
case 2:
if (siga_err & QDIO_SIGA_ERROR_B_BIT_SET) {
if (qdio_err & QDIO_ERROR_SIGA_BUSY) {
QETH_DBF_TEXT(TRACE, 1, "SIGAcc2B");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
return QETH_SEND_ERROR_KICK_IT;
@ -2758,8 +2750,8 @@ static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
return 0;
}
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
int index, int count)
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf;
int rc;
@ -2807,12 +2799,10 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
qeth_get_micros();
}
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (under_int)
qdio_flags |= QDIO_FLAG_UNDER_INTERRUPT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count, NULL);
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
@ -2866,16 +2856,15 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
if (flush_cnt)
qeth_flush_buffers(queue, 1, index, flush_cnt);
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
}
}
void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
unsigned int qdio_error, unsigned int siga_error,
unsigned int __queue, int first_element, int count,
unsigned long card_ptr)
void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, int __queue, int first_element,
int count, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
@ -2883,15 +2872,12 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
int i;
QETH_DBF_TEXT(TRACE, 6, "qdouhdl");
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 2, "achkcond");
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 2, "%08x", status);
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
}
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 2, "achkcond");
QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card));
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
}
if (card->options.performance_stats) {
card->perf_stats.outbound_handler_cnt++;
@ -2901,8 +2887,7 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, unsigned int status,
for (i = first_element; i < (first_element + count); ++i) {
buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
/*we only handle the KICK_IT error by doing a recovery */
if (qeth_handle_send_error(card, buffer,
qdio_error, siga_error)
if (qeth_handle_send_error(card, buffer, qdio_error)
== QETH_SEND_ERROR_KICK_IT){
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
@ -3164,11 +3149,11 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (ctx == NULL) {
qeth_fill_buffer(queue, buffer, skb);
qeth_flush_buffers(queue, 0, index, 1);
qeth_flush_buffers(queue, index, 1);
} else {
flush_cnt = qeth_eddp_fill_buffer(queue, ctx, index);
WARN_ON(buffers_needed != flush_cnt);
qeth_flush_buffers(queue, 0, index, flush_cnt);
qeth_flush_buffers(queue, index, flush_cnt);
}
return 0;
out:
@ -3221,8 +3206,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY){
qeth_flush_buffers(queue, 0,
start_index, flush_count);
qeth_flush_buffers(queue, start_index,
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
return -EBUSY;
@ -3253,7 +3238,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
flush_count += tmp;
out:
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
@ -3274,7 +3259,7 @@ out:
if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
flush_count += qeth_flush_buffers_on_no_pci(queue);
if (flush_count)
qeth_flush_buffers(queue, 0, start_index, flush_count);
qeth_flush_buffers(queue, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
@ -3686,10 +3671,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.min_input_threshold = QETH_MIN_INPUT_THRESHOLD;
init_data.max_input_threshold = QETH_MAX_INPUT_THRESHOLD;
init_data.min_output_threshold = QETH_MIN_OUTPUT_THRESHOLD;
init_data.max_output_threshold = QETH_MAX_OUTPUT_THRESHOLD;
init_data.no_input_qs = 1;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
@ -3751,8 +3732,9 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
struct qdio_ssqd_desc *qdio_ssqd;
int retries = 3;
int mpno;
int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@ -3784,7 +3766,10 @@ retry:
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
return rc;
}
mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card));
if (qdio_ssqd)
mpno = qdio_ssqd->pcnt;
if (mpno)
mpno = min(mpno - 1, QETH_MAX_PORTNO);
if (card->info.portno > mpno) {

View File

@ -726,8 +726,7 @@ tx_drop:
}
static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err,
unsigned int siga_err, unsigned int queue,
unsigned int qdio_err, unsigned int queue,
int first_element, int count, unsigned long card_ptr)
{
struct net_device *net_dev;
@ -742,23 +741,20 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
count);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
qeth_schedule_recovery(card);
return;
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", first_element,
count);
QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err, "qinerr")))
if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);

View File

@ -2939,8 +2939,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
}
static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
unsigned int status, unsigned int qdio_err,
unsigned int siga_err, unsigned int queue, int first_element,
unsigned int qdio_err, unsigned int queue, int first_element,
int count, unsigned long card_ptr)
{
struct net_device *net_dev;
@ -2955,23 +2954,21 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
card->perf_stats.inbound_cnt++;
card->perf_stats.inbound_start_time = qeth_get_micros();
}
if (status & QDIO_STATUS_LOOK_FOR_ERROR) {
if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
first_element, count);
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X", queue, status);
qeth_schedule_recovery(card);
return;
}
if (qdio_err & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_DBF_TEXT(TRACE, 1, "qdinchk");
QETH_DBF_TEXT_(TRACE, 1, "%s", CARD_BUS_ID(card));
QETH_DBF_TEXT_(TRACE, 1, "%04X%04X",
first_element, count);
QETH_DBF_TEXT_(TRACE, 1, "%04X", queue);
qeth_schedule_recovery(card);
return;
}
for (i = first_element; i < (first_element + count); ++i) {
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!((status & QDIO_STATUS_LOOK_FOR_ERROR) &&
if (!(qdio_err &&
qeth_check_qdio_errors(buffer->buffer,
qdio_err, siga_err, "qinerr")))
qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);

View File

@ -297,15 +297,13 @@ void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
/**
* zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
* @adapter: adapter affected by this QDIO related event
* @status: as passed by qdio module
* @qdio_error: as passed by qdio module
* @siga_error: as passed by qdio module
* @sbal_index: first buffer with error condition, as passed by qdio module
* @sbal_count: number of buffers affected, as passed by qdio module
*/
void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
unsigned int qdio_error, unsigned int siga_error,
int sbal_index, int sbal_count)
void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
unsigned int qdio_error, int sbal_index,
int sbal_count)
{
struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
unsigned long flags;
@ -313,9 +311,7 @@ void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter, unsigned int status,
spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
memset(r, 0, sizeof(*r));
strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
r->u.qdio.status = status;
r->u.qdio.qdio_error = qdio_error;
r->u.qdio.siga_error = siga_error;
r->u.qdio.sbal_index = sbal_index;
r->u.qdio.sbal_count = sbal_count;
debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
@ -398,9 +394,7 @@ static void zfcp_hba_dbf_view_status(char **p,
static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
{
zfcp_dbf_out(p, "status", "0x%08x", r->status);
zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
zfcp_dbf_out(p, "siga_error", "0x%08x", r->siga_error);
zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
}

View File

@ -139,9 +139,7 @@ struct zfcp_hba_dbf_record_status {
} __attribute__ ((packed));
struct zfcp_hba_dbf_record_qdio {
u32 status;
u32 qdio_error;
u32 siga_error;
u8 sbal_index;
u8 sbal_count;
} __attribute__ ((packed));

View File

@ -48,9 +48,8 @@ extern void zfcp_rec_dbf_event_action(u8, struct zfcp_erp_action *);
extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
struct fsf_status_read_buffer *);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *,
unsigned int, unsigned int, unsigned int,
int, int);
extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
int);
extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);

View File

@ -74,17 +74,15 @@ static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
}
}
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int status,
unsigned int qdio_err, unsigned int siga_err,
unsigned int queue_no, int first, int count,
static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
struct zfcp_qdio_queue *queue = &adapter->req_q;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
first, count);
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 140);
return;
}
@ -129,8 +127,7 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
count = atomic_read(&queue->count) + processed;
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT,
0, start, count, NULL);
retval = do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, start, count);
if (unlikely(retval)) {
atomic_set(&queue->count, count);
@ -142,9 +139,8 @@ static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
}
}
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
unsigned int qdio_err, unsigned int siga_err,
unsigned int queue_no, int first, int count,
static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
int queue_no, int first, int count,
unsigned long parm)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
@ -152,9 +148,8 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int status,
volatile struct qdio_buffer_element *sbale;
int sbal_idx, sbale_idx, sbal_no;
if (unlikely(status & QDIO_STATUS_LOOK_FOR_ERROR)) {
zfcp_hba_dbf_event_qdio(adapter, status, qdio_err, siga_err,
first, count);
if (unlikely(qdio_err)) {
zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
zfcp_qdio_handler_error(adapter, 147);
return;
}
@ -362,7 +357,7 @@ int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
}
retval = do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0, first,
count, NULL);
count);
if (unlikely(retval)) {
zfcp_qdio_zero_sbals(req_q->sbal, first, count);
return retval;
@ -400,10 +395,6 @@ int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
init_data->qib_param_field = NULL;
init_data->input_slib_elements = NULL;
init_data->output_slib_elements = NULL;
init_data->min_input_threshold = 1;
init_data->max_input_threshold = 5000;
init_data->min_output_threshold = 1;
init_data->max_output_threshold = 1000;
init_data->no_input_qs = 1;
init_data->no_output_qs = 1;
init_data->input_handler = zfcp_qdio_int_resp;
@ -436,9 +427,7 @@ void zfcp_qdio_close(struct zfcp_adapter *adapter)
atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
spin_unlock(&req_q->lock);
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
== -EINPROGRESS)
ssleep(1);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
/* cleanup used outbound sbals */
count = atomic_read(&req_q->count);
@ -473,7 +462,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return -EIO;
}
if (qdio_activate(adapter->ccw_device, 0)) {
if (qdio_activate(adapter->ccw_device)) {
dev_err(&adapter->ccw_device->dev,
"Activate of QDIO queues failed.\n");
goto failed_qdio;
@ -487,7 +476,7 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
}
if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
QDIO_MAX_BUFFERS_PER_Q, NULL)) {
QDIO_MAX_BUFFERS_PER_Q)) {
dev_err(&adapter->ccw_device->dev,
"Init of QDIO response queue failed.\n");
goto failed_qdio;
@ -501,9 +490,6 @@ int zfcp_qdio_open(struct zfcp_adapter *adapter)
return 0;
failed_qdio:
while (qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR)
== -EINPROGRESS)
ssleep(1);
qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
return -EIO;
}

View File

@ -1,404 +1,382 @@
/*
* linux/include/asm-s390/qdio.h
*
* Linux for S/390 QDIO base support, Hipersocket base support
* version 2
*
* Copyright 2000,2002 IBM Corporation
* Copyright 2000,2008 IBM Corp.
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*
*/
#ifndef __QDIO_H__
#define __QDIO_H__
/* note, that most of the typedef's are from ingo. */
#include <linux/interrupt.h>
#include <asm/cio.h>
#include <asm/ccwdev.h>
#define QDIO_NAME "qdio "
#define QDIO_MAX_QUEUES_PER_IRQ 32
#define QDIO_MAX_BUFFERS_PER_Q 128
#define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define QDIO_SBAL_SIZE 256
#ifndef __s390x__
#define QDIO_32_BIT
#endif /* __s390x__ */
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
/**** CONSTANTS, that are relied on without using these symbols *****/
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* used in width of unsigned int */
/************************ END of CONSTANTS **************************/
#define QDIO_MAX_BUFFERS_PER_Q 128 /* must be a power of 2 (%x=&(x-1)*/
#define QDIO_BUF_ORDER 7 /* 2**this == number of pages used for sbals in 1 q */
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define SBAL_SIZE 256
/**
* struct qdesfmt0 - queue descriptor, format 0
* @sliba: storage list information block address
* @sla: storage list address
* @slsba: storage list state block address
* @akey: access key for DLIB
* @bkey: access key for SL
* @ckey: access key for SBALs
* @dkey: access key for SLSB
*/
struct qdesfmt0 {
u64 sliba;
u64 sla;
u64 slsba;
u32 : 32;
u32 akey : 4;
u32 bkey : 4;
u32 ckey : 4;
u32 dkey : 4;
u32 : 16;
} __attribute__ ((packed));
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
#define QDIO_IQDIO_QFMT_ASYNCH 3
/**
* struct qdr - queue description record (QDR)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @ac: adapter characteristics
* @iqdcnt: input queue descriptor count
* @oqdcnt: output queue descriptor count
* @iqdsz: inpout queue descriptor size
* @oqdsz: output queue descriptor size
* @qiba: queue information block address
* @qkey: queue information block key
* @qdf0: queue descriptions
*/
struct qdr {
u32 qfmt : 8;
u32 pfmt : 8;
u32 : 8;
u32 ac : 8;
u32 : 8;
u32 iqdcnt : 8;
u32 : 8;
u32 oqdcnt : 8;
u32 : 8;
u32 iqdsz : 8;
u32 : 8;
u32 oqdsz : 8;
/* private: */
u32 res[9];
/* public: */
u64 qiba;
u32 : 32;
u32 qkey : 4;
u32 : 28;
struct qdesfmt0 qdf0[126];
} __attribute__ ((packed, aligned(4096)));
struct qdio_buffer_element{
unsigned int flags;
unsigned int length;
#ifdef QDIO_32_BIT
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
#define QIB_RFLAGS_ENABLE_QEBSM 0x80
/**
* struct qib - queue information block (QIB)
* @qfmt: queue format
* @pfmt: implementation dependent parameter format
* @rflags: QEBSM
* @ac: adapter characteristics
* @isliba: absolute address of first input SLIB
* @osliba: absolute address of first output SLIB
* @ebcnam: adapter identifier in EBCDIC
* @parm: implementation dependent parameters
*/
struct qib {
u32 qfmt : 8;
u32 pfmt : 8;
u32 rflags : 8;
u32 ac : 8;
u32 : 32;
u64 isliba;
u64 osliba;
u32 : 32;
u32 : 32;
u8 ebcnam[8];
/* private: */
u8 res[88];
/* public: */
u8 parm[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(256)));
/**
* struct slibe - storage list information block element (SLIBE)
* @parms: implementation dependent parameters
*/
struct slibe {
u64 parms;
};
/**
* struct slib - storage list information block (SLIB)
* @nsliba: next SLIB address (if any)
* @sla: SL address
* @slsba: SLSB address
* @slibe: SLIB elements
*/
struct slib {
u64 nsliba;
u64 sla;
u64 slsba;
/* private: */
u8 res[1000];
/* public: */
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(2048)));
/**
* struct sbal_flags - storage block address list flags
* @last: last entry
* @cont: contiguous storage
* @frag: fragmentation
*/
struct sbal_flags {
u8 : 1;
u8 last : 1;
u8 cont : 1;
u8 : 1;
u8 frag : 2;
u8 : 2;
} __attribute__ ((packed));
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_PCI 0x40
/**
* struct sbal_sbalf_0 - sbal flags for sbale 0
* @pci: PCI indicator
* @cont: data continuation
* @sbtype: storage-block type (FCP)
*/
struct sbal_sbalf_0 {
u8 : 1;
u8 pci : 1;
u8 cont : 1;
u8 sbtype : 2;
u8 : 3;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_1 - sbal flags for sbale 1
* @key: storage key
*/
struct sbal_sbalf_1 {
u8 : 4;
u8 key : 4;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_14 - sbal flags for sbale 14
* @erridx: error index
*/
struct sbal_sbalf_14 {
u8 : 4;
u8 erridx : 4;
} __attribute__ ((packed));
/**
* struct sbal_sbalf_15 - sbal flags for sbale 15
* @reason: reason for error state
*/
struct sbal_sbalf_15 {
u8 reason;
} __attribute__ ((packed));
/**
* union sbal_sbalf - storage block address list flags
* @i0: sbalf0
* @i1: sbalf1
* @i14: sbalf14
* @i15: sblaf15
* @value: raw value
*/
union sbal_sbalf {
struct sbal_sbalf_0 i0;
struct sbal_sbalf_1 i1;
struct sbal_sbalf_14 i14;
struct sbal_sbalf_15 i15;
u8 value;
};
/**
* struct qdio_buffer_element - SBAL entry
* @flags: flags
* @length: length
* @addr: address
*/
struct qdio_buffer_element {
u32 flags;
u32 length;
#ifdef CONFIG_32BIT
/* private: */
void *reserved;
#endif /* QDIO_32_BIT */
/* public: */
#endif
void *addr;
} __attribute__ ((packed,aligned(16)));
} __attribute__ ((packed, aligned(16)));
struct qdio_buffer{
volatile struct qdio_buffer_element element[16];
} __attribute__ ((packed,aligned(256)));
/**
* struct qdio_buffer - storage block address list (SBAL)
* @element: SBAL entries
*/
struct qdio_buffer {
struct qdio_buffer_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed, aligned(256)));
/**
* struct sl_element - storage list entry
* @sbal: absolute SBAL address
*/
struct sl_element {
#ifdef CONFIG_32BIT
/* private: */
unsigned long reserved;
/* public: */
#endif
unsigned long sbal;
} __attribute__ ((packed));
/* params are: ccw_device, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(struct ccw_device *,unsigned int,unsigned int,
unsigned int,unsigned int,int,int,unsigned long);
/**
* struct sl - storage list (SL)
* @element: SL entries
*/
struct sl {
struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(1024)));
/**
* struct slsb - storage list state block (SLSB)
* @val: state per buffer
*/
struct slsb {
u8 val[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(256)));
#define QDIO_STATUS_INBOUND_INT 0x01
#define QDIO_STATUS_OUTBOUND_INT 0x02
#define QDIO_STATUS_LOOK_FOR_ERROR 0x04
#define QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR 0x08
#define QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR 0x10
#define QDIO_STATUS_ACTIVATE_CHECK_CONDITION 0x20
struct qdio_ssqd_desc {
u8 flags;
u8:8;
u16 sch;
u8 qfmt;
u8 parm;
u8 qdioac1;
u8 sch_class;
u8 pcnt;
u8 icnt;
u8:8;
u8 ocnt;
u8:8;
u8 mbccnt;
u16 qdioac2;
u64 sch_token;
u64:64;
} __attribute__ ((packed));
#define QDIO_SIGA_ERROR_ACCESS_EXCEPTION 0x10
#define QDIO_SIGA_ERROR_B_BIT_SET 0x20
/* params are: ccw_device, qdio_error, queue_number,
first element processed, number of elements processed, int_parm */
typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
int, int, unsigned long);
/* qdio errors reported to the upper-layer program */
#define QDIO_ERROR_SIGA_ACCESS_EXCEPTION 0x10
#define QDIO_ERROR_SIGA_BUSY 0x20
#define QDIO_ERROR_ACTIVATE_CHECK_CONDITION 0x40
#define QDIO_ERROR_SLSB_STATE 0x80
/* for qdio_initialize */
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
/**
* struct qdio_initialize - qdio initalization data
* @cdev: associated ccw device
* @q_format: queue format
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
* @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
* @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
* @no_input_qs: number of input queues
* @no_output_qs: number of output queues
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
* @int_parm: interruption parameter
* @flags: initialization flags
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
*/
struct qdio_initialize {
struct ccw_device *cdev;
unsigned char q_format;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; /*adapter dependent*/
/* pointer to 128 bytes or NULL, if no param field */
unsigned char *qib_param_field; /* adapter dependent */
/* pointer to no_queues*128 words of data or NULL */
unsigned int qib_param_field_format;
unsigned char *qib_param_field;
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int min_input_threshold;
unsigned int max_input_threshold;
unsigned int min_output_threshold;
unsigned int max_output_threshold;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
unsigned long int_parm;
unsigned long flags;
void **input_sbal_addr_array; /* addr of n*128 void ptrs */
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
void **input_sbal_addr_array;
void **output_sbal_addr_array;
};
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_PCI_OUT 0x10
extern int qdio_initialize(struct qdio_initialize *init_data);
extern int qdio_allocate(struct qdio_initialize *init_data);
extern int qdio_establish(struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *);
extern int qdio_activate(struct ccw_device *,int flags);
#define QDIO_STATE_MUST_USE_OUTB_PCI 0x00000001
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_initialize */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
extern unsigned long qdio_get_status(int irq);
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_UNDER_INTERRUPT 0x04
#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
adapter interrupts */
#define QDIO_FLAG_DONT_SIGA 0x10
#define QDIO_FLAG_PCI_OUT 0x20
extern int do_QDIO(struct ccw_device*, unsigned int flags,
unsigned int queue_number,
unsigned int qidx,unsigned int count,
struct qdio_buffer *buffers);
extern int qdio_get_ssqd_pct(struct ccw_device*);
extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
unsigned int queue_number);
extern int do_QDIO(struct ccw_device*, unsigned int flags,
int q_nr, int qidx, int count);
extern int qdio_cleanup(struct ccw_device*, int how);
extern int qdio_shutdown(struct ccw_device*, int how);
extern int qdio_free(struct ccw_device*);
unsigned char qdio_get_slsb_state(struct ccw_device*, unsigned int flag,
unsigned int queue_number,
unsigned int qidx);
extern void qdio_init_scrubber(void);
struct qdesfmt0 {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sliba; /* storage-list-information-block
address */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* storage-list address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* storage-list-state-block address */
unsigned int res4; /* reserved */
unsigned int akey : 4; /* access key for DLIB */
unsigned int bkey : 4; /* access key for SL */
unsigned int ckey : 4; /* access key for SBALs */
unsigned int dkey : 4; /* access key for SLSB */
unsigned int res5 : 16; /* reserved */
} __attribute__ ((packed));
/*
* Queue-Description record (QDR)
*/
struct qdr {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2 : 8; /* reserved */
unsigned int iqdcnt : 8; /* input-queue-descriptor count */
unsigned int res3 : 8; /* reserved */
unsigned int oqdcnt : 8; /* output-queue-descriptor count */
unsigned int res4 : 8; /* reserved */
unsigned int iqdsz : 8; /* input-queue-descriptor size */
unsigned int res5 : 8; /* reserved */
unsigned int oqdsz : 8; /* output-queue-descriptor size */
unsigned int res6[9]; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res7; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long qiba; /* queue-information-block address */
unsigned int res8; /* reserved */
unsigned int qkey : 4; /* queue-information-block key */
unsigned int res9 : 28; /* reserved */
/* union _qd {*/ /* why this? */
struct qdesfmt0 qdf0[126];
/* } qd;*/
} __attribute__ ((packed,aligned(4096)));
/*
* queue information block (QIB)
*/
#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
#define QIB_RFLAGS_ENABLE_QEBSM 0x80
struct qib {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int rflags : 8; /* QEBSM */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long isliba; /* absolute address of 1st
input SLIB */
#ifdef QDIO_32_BIT
unsigned long res4; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long osliba; /* absolute address of 1st
output SLIB */
unsigned int res5; /* reserved */
unsigned int res6; /* reserved */
unsigned char ebcnam[8]; /* adapter identifier in EBCDIC */
unsigned char res7[88]; /* reserved */
unsigned char parm[QDIO_MAX_BUFFERS_PER_Q];
/* implementation dependent
parameters */
} __attribute__ ((packed,aligned(256)));
/*
* storage-list-information block element (SLIBE)
*/
struct slibe {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long parms; /* implementation dependent
parameters */
};
/*
* storage-list-information block (SLIB)
*/
struct slib {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long nsliba; /* next SLIB address (if any) */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* SL address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* SLSB address */
unsigned char res4[1000]; /* reserved */
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; /* SLIB elements */
} __attribute__ ((packed,aligned(2048)));
struct sbal_flags {
unsigned char res1 : 1; /* reserved */
unsigned char last : 1; /* last entry */
unsigned char cont : 1; /* contiguous storage */
unsigned char res2 : 1; /* reserved */
unsigned char frag : 2; /* fragmentation (s.below) */
unsigned char res3 : 2; /* reserved */
} __attribute__ ((packed));
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
/* Naught of interest beyond this point */
#define SBAL_FLAGS0_PCI 0x40
struct sbal_sbalf_0 {
unsigned char res1 : 1; /* reserved */
unsigned char pci : 1; /* PCI indicator */
unsigned char cont : 1; /* data continuation */
unsigned char sbtype: 2; /* storage-block type (OpenFCP) */
unsigned char res2 : 3; /* reserved */
} __attribute__ ((packed));
struct sbal_sbalf_1 {
unsigned char res1 : 4; /* reserved */
unsigned char key : 4; /* storage key */
} __attribute__ ((packed));
struct sbal_sbalf_14 {
unsigned char res1 : 4; /* reserved */
unsigned char erridx : 4; /* error index */
} __attribute__ ((packed));
struct sbal_sbalf_15 {
unsigned char reason; /* reserved */
} __attribute__ ((packed));
union sbal_sbalf {
struct sbal_sbalf_0 i0;
struct sbal_sbalf_1 i1;
struct sbal_sbalf_14 i14;
struct sbal_sbalf_15 i15;
unsigned char value;
};
struct sbal_element {
union {
struct sbal_flags bits; /* flags */
unsigned char value;
} flags;
unsigned int res1 : 16; /* reserved */
union sbal_sbalf sbalf; /* SBAL flags */
unsigned int res2 : 16; /* reserved */
unsigned int count : 16; /* data count */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long addr; /* absolute data address */
} __attribute__ ((packed,aligned(16)));
/*
* strorage-block access-list (SBAL)
*/
struct sbal {
struct sbal_element element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed,aligned(256)));
/*
* storage-list (SL)
*/
struct sl_element {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sbal; /* absolute SBAL address */
} __attribute__ ((packed));
struct sl {
struct sl_element element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed,aligned(1024)));
/*
* storage-list-state block (SLSB)
*/
struct slsb_flags {
unsigned char owner : 2; /* SBAL owner */
unsigned char type : 1; /* buffer type */
unsigned char state : 5; /* processing state */
} __attribute__ ((packed));
struct slsb {
union {
unsigned char val[QDIO_MAX_BUFFERS_PER_Q];
struct slsb_flags flags[QDIO_MAX_BUFFERS_PER_Q];
} acc;
} __attribute__ ((packed,aligned(256)));
/*
* SLSB values
*/
#define SLSB_OWNER_PROG 1
#define SLSB_OWNER_CU 2
#define SLSB_TYPE_INPUT 0
#define SLSB_TYPE_OUTPUT 1
#define SLSB_STATE_NOT_INIT 0
#define SLSB_STATE_EMPTY 1
#define SLSB_STATE_PRIMED 2
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_P_INPUT_NOT_INIT 0x80
#define SLSB_P_INPUT_PROCESSING 0x81
#define SLSB_CU_INPUT_EMPTY 0x41
#define SLSB_P_INPUT_PRIMED 0x82
#define SLSB_P_INPUT_HALTED 0x8E
#define SLSB_P_INPUT_ERROR 0x8F
#define SLSB_P_OUTPUT_NOT_INIT 0xA0
#define SLSB_P_OUTPUT_EMPTY 0xA1
#define SLSB_CU_OUTPUT_PRIMED 0x62
#define SLSB_P_OUTPUT_HALTED 0xAE
#define SLSB_P_OUTPUT_ERROR 0xAF
#define SLSB_ERROR_DURING_LOOKUP 0xFF
extern int qdio_free(struct ccw_device *);
extern struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev);
#endif /* __QDIO_H__ */

View File

@ -65,6 +65,7 @@ extern unsigned long machine_flags;
#define MACHINE_FLAG_VM (1UL << 0)
#define MACHINE_FLAG_IEEE (1UL << 1)
#define MACHINE_FLAG_P390 (1UL << 2)
#define MACHINE_FLAG_CSP (1UL << 3)
#define MACHINE_FLAG_MVPG (1UL << 4)
#define MACHINE_FLAG_DIAG44 (1UL << 5)
@ -77,7 +78,6 @@ extern unsigned long machine_flags;
#define MACHINE_IS_VM (machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (machine_flags & MACHINE_FLAG_KVM)
#define MACHINE_IS_P390 (machine_flags & MACHINE_FLAG_P390)
#define MACHINE_HAS_DIAG9C (machine_flags & MACHINE_FLAG_DIAG9C)
#ifndef __s390x__