dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'upstream-fixes'

This commit is contained in:
Jeff Garzik 2006-02-23 21:16:27 -05:00
commit 7b0386921d
120 changed files with 1501 additions and 2192 deletions

View File

@ -46,10 +46,12 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using
maxcpus=2 will only boot 2. You can choose to bring the
other cpus later online, read FAQ's for more info.
additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus.
This option sets
additional_cpus*=n Use this to limit hotpluggable cpus. This option sets
cpu_possible_map = cpu_present_map + additional_cpus
(*) Option valid only for following architectures
- x86_64, ia64, s390
ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT
to determine the number of potentially hot-pluggable cpus. The implementation
should only rely on this to count the #of cpus, but *MUST* not rely on the
@ -57,6 +59,9 @@ apicid values in those tables for disabled apics. In the event BIOS doesnt
mark such hot-pluggable cpus as disabled entries, one could use this
parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map.
s390 uses the number of cpus it detects at IPL time to also the number of bits
in cpu_possible_map. If it is desired to add additional cpus at a later time
the number should be specified using this option or the possible_cpus option.
possible_cpus=n [s390 only] use this to set hotpluggable cpus.
This option sets possible_cpus bits in

View File

@ -178,3 +178,12 @@ Why: The ISA interface is faster and should be always available. The I2C
probing is also known to cause trouble in at least one case (see
bug #5889.)
Who: Jean Delvare <khali@linux-fr.org>
---------------------------
What: mount/umount uevents
When: February 2007
Why: These events are not correct, and do not properly let userspace know
when a file system has been mounted or unmounted. Userspace should
poll the /proc/mounts file instead to detect this properly.
Who: Greg Kroah-Hartman <gregkh@suse.de>

View File

@ -79,15 +79,18 @@ that instance in a system with many cpus making intensive use of it.
tmpfs has a mount option to set the NUMA memory allocation policy for
all files in that instance:
mpol=interleave prefers to allocate memory from each node in turn
mpol=default prefers to allocate memory from the local node
mpol=bind prefers to allocate from mpol_nodelist
mpol=preferred prefers to allocate from first node in mpol_nodelist
all files in that instance (if CONFIG_NUMA is enabled) - which can be
adjusted on the fly via 'mount -o remount ...'
The following mount option is used in conjunction with mpol=interleave,
mpol=bind or mpol=preferred:
mpol_nodelist: nodelist suitable for parsing with nodelist_parse.
mpol=default prefers to allocate memory from the local node
mpol=prefer:Node prefers to allocate memory from the given Node
mpol=bind:NodeList allocates memory only from nodes in NodeList
mpol=interleave prefers to allocate from each node in turn
mpol=interleave:NodeList allocates from each node of NodeList in turn
NodeList format is a comma-separated list of decimal numbers and ranges,
a range being two hyphen-separated decimal numbers, the smallest and
largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15
To specify the initial root directory you can use the following mount
@ -109,4 +112,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root.
Author:
Christoph Rohland <cr@sap.com>, 1.12.01
Updated:
Hugh Dickins <hugh@veritas.com>, 13 March 2005
Hugh Dickins <hugh@veritas.com>, 19 February 2006

View File

@ -57,8 +57,6 @@ OPTIONS
port=n port to connect to on the remote server
timeout=n request timeouts (in ms) (default 60000ms)
noextend force legacy mode (no 9P2000.u semantics)
uid attempt to mount as a particular uid
@ -74,10 +72,16 @@ OPTIONS
RESOURCES
=========
The Linux version of the 9P server, along with some client-side utilities
can be found at http://v9fs.sf.net (along with a CVS repository of the
development branch of this module). There are user and developer mailing
lists here, as well as a bug-tracker.
The Linux version of the 9P server is now maintained under the npfs project
on sourceforge (http://sourceforge.net/projects/npfs).
There are user and developer mailing lists available through the v9fs project
on sourceforge (http://sourceforge.net/projects/v9fs).
News and other information is maintained on SWiK (http://swik.net/v9fs).
Bug reports may be issued through the kernel.org bugzilla
(http://bugzilla.kernel.org)
For more information on the Plan 9 Operating System check out
http://plan9.bell-labs.com/plan9

View File

@ -16,6 +16,7 @@ before actually making adjustments.
Currently, these files might (depending on your configuration)
show up in /proc/sys/kernel:
- acpi_video_flags
- acct
- core_pattern
- core_uses_pid
@ -57,6 +58,15 @@ show up in /proc/sys/kernel:
==============================================================
acpi_video_flags:
flags
See Doc*/kernel/power/video.txt, it allows mode of video boot to be
set during run time.
==============================================================
acct:
highwater lowwater frequency

View File

@ -128,19 +128,27 @@ EXPORT_SYMBOL(rtc_tm_to_time);
/*
* Calculate the next alarm time given the requested alarm time mask
* and the current time.
*
* FIXME: for now, we just copy the alarm time because we're lazy (and
* is therefore buggy - setting a 10am alarm at 8pm will not result in
* the alarm triggering.)
*/
void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
{
unsigned long next_time;
unsigned long now_time;
next->tm_year = now->tm_year;
next->tm_mon = now->tm_mon;
next->tm_mday = now->tm_mday;
next->tm_hour = alrm->tm_hour;
next->tm_min = alrm->tm_min;
next->tm_sec = alrm->tm_sec;
rtc_tm_to_time(now, &now_time);
rtc_tm_to_time(next, &next_time);
if (next_time < now_time) {
/* Advance one day */
next_time += 60 * 60 * 24;
rtc_time_to_tm(next_time, next);
}
}
static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm)

View File

@ -566,7 +566,7 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]!
#endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_MPCORE
#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r5, r4, [ip] @ Clear exclusive monitor

View File

@ -19,6 +19,7 @@
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <asm/atomic.h>
@ -231,6 +232,13 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
__die(str, err, thread, regs);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
do_exit(SIGSEGV);
}

View File

@ -100,8 +100,10 @@ void __init at91_add_device_udc(struct at91_udc_data *data)
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
if (data->pullup_pin)
if (data->pullup_pin) {
at91_set_gpio_output(data->pullup_pin, 0);
at91_set_multi_drive(data->pullup_pin, 1);
}
udc_data = *data;
platform_device_register(&at91rm9200_udc_device);

View File

@ -159,6 +159,23 @@ int __init_or_module at91_set_deglitch(unsigned pin, int is_on)
}
EXPORT_SYMBOL(at91_set_deglitch);
/*
* enable/disable the multi-driver; This is only valid for output and
* allows the output pin to run as an open collector output.
*/
int __init_or_module at91_set_multi_drive(unsigned pin, int is_on)
{
void __iomem *pio = pin_to_controller(pin);
unsigned mask = pin_to_mask(pin);
if (!pio)
return -EINVAL;
__raw_writel(mask, pio + (is_on ? PIO_MDER : PIO_MDDR));
return 0;
}
EXPORT_SYMBOL(at91_set_multi_drive);
/*--------------------------------------------------------------------------*/

View File

@ -111,24 +111,30 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type)
if (line < 0)
return -EINVAL;
if (type & IRQT_BOTHEDGE) {
switch (type){
case IRQT_BOTHEDGE:
int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL;
irq_type = IXP4XX_IRQ_EDGE;
} else if (type & IRQT_RISING) {
break;
case IRQT_RISING:
int_style = IXP4XX_GPIO_STYLE_RISING_EDGE;
irq_type = IXP4XX_IRQ_EDGE;
} else if (type & IRQT_FALLING) {
break;
case IRQT_FALLING:
int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE;
irq_type = IXP4XX_IRQ_EDGE;
} else if (type & IRQT_HIGH) {
break;
case IRQT_HIGH:
int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH;
irq_type = IXP4XX_IRQ_LEVEL;
} else if (type & IRQT_LOW) {
break;
case IRQT_LOW:
int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW;
irq_type = IXP4XX_IRQ_LEVEL;
} else
break;
default:
return -EINVAL;
}
ixp4xx_config_irq(irq, irq_type);
if (line >= 8) { /* pins 8-15 */

View File

@ -77,6 +77,9 @@ static int __init nslu2_power_init(void)
static void __exit nslu2_power_exit(void)
{
if (!(machine_is_nslu2()))
return;
free_irq(NSLU2_RB_IRQ, NULL);
free_irq(NSLU2_PB_IRQ, NULL);
}

View File

@ -50,6 +50,12 @@ static struct platform_device nslu2_i2c_controller = {
.num_resources = 0,
};
static struct platform_device nslu2_beeper = {
.name = "ixp4xx-beeper",
.id = NSLU2_GPIO_BUZZ,
.num_resources = 0,
};
static struct resource nslu2_uart_resources[] = {
{
.start = IXP4XX_UART1_BASE_PHYS,
@ -97,6 +103,7 @@ static struct platform_device *nslu2_devices[] __initdata = {
&nslu2_i2c_controller,
&nslu2_flash,
&nslu2_uart,
&nslu2_beeper,
};
static void nslu2_power_off(void)

View File

@ -240,6 +240,14 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
int i;
int myslot = -1;
unsigned long val;
void __iomem *local_pci_cfg_base;
val = __raw_readl(SYS_PCICTL);
if (!(val & 1)) {
printk("Not plugged into PCI backplane!\n");
ret = -EIO;
goto out;
}
if (nr == 0) {
sys->mem_offset = 0;
@ -253,48 +261,45 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
goto out;
}
__raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28,PCI_IMAP0);
__raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28,PCI_IMAP1);
__raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28,PCI_IMAP2);
__raw_writel(1, SYS_PCICTL);
val = __raw_readl(SYS_PCICTL);
if (!(val & 1)) {
printk("Not plugged into PCI backplane!\n");
ret = -EIO;
goto out;
}
/*
* We need to discover the PCI core first to configure itself
* before the main PCI probing is performed
*/
for (i=0; i<32; i++) {
for (i=0; i<32; i++)
if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) &&
(__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) {
myslot = i;
__raw_writel(myslot, PCI_SELFID);
val = __raw_readl(VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
val |= (1<<2);
__raw_writel(val, VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET);
break;
}
}
if (myslot == -1) {
printk("Cannot find PCI core!\n");
ret = -EIO;
} else {
printk("PCI core found (slot %d)\n",myslot);
/* Do not to map Versatile FPGA PCI device
into memory space as we are short of
mappable memory */
pci_slot_ignore |= (1 << myslot);
ret = 1;
goto out;
}
printk("PCI core found (slot %d)\n",myslot);
__raw_writel(myslot, PCI_SELFID);
local_pci_cfg_base = (void *) VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11);
val = __raw_readl(local_pci_cfg_base + CSR_OFFSET);
val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE;
__raw_writel(val, local_pci_cfg_base + CSR_OFFSET);
/*
* Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM
*/
__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0);
__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1);
__raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
/*
* Do not to map Versatile FPGA PCI device into memory space
*/
pci_slot_ignore |= (1 << myslot);
ret = 1;
out:
return ret;
}
@ -305,18 +310,18 @@ struct pci_bus *pci_versatile_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys);
}
/*
* V3_LB_BASE? - local bus address
* V3_LB_MAP? - pci bus address
*/
void __init pci_versatile_preinit(void)
{
}
__raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28, PCI_IMAP0);
__raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28, PCI_IMAP1);
__raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28, PCI_IMAP2);
void __init pci_versatile_postinit(void)
{
}
__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP0);
__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP1);
__raw_writel(PHYS_OFFSET >> 28, PCI_SMAP2);
__raw_writel(1, SYS_PCICTL);
}
/*
* map the specified device/slot/pin to an IRQ. Different backplanes may need to modify this.
@ -326,16 +331,15 @@ static int __init versatile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
int irq;
int devslot = PCI_SLOT(dev->devfn);
/* slot, pin, irq
24 1 27
25 1 28 untested
26 1 29
27 1 30 untested
*/
/* slot, pin, irq
* 24 1 27
* 25 1 28
* 26 1 29
* 27 1 30
*/
irq = 27 + ((slot + pin - 1) & 3);
irq = 27 + ((slot + pin + 2) % 3); /* Fudged */
printk("map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
printk("PCI map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq);
return irq;
}
@ -347,7 +351,6 @@ static struct hw_pci versatile_pci __initdata = {
.setup = pci_versatile_setup,
.scan = pci_versatile_scan_bus,
.preinit = pci_versatile_preinit,
.postinit = pci_versatile_postinit,
};
static int __init versatile_pci_init(void)

View File

@ -20,7 +20,7 @@
*/
.align 5
ENTRY(v6_early_abort)
#ifdef CONFIG_CPU_MPCORE
#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r0, r1, [sp] @ Clear the exclusive monitor

View File

@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
# Last update: Mon Jan 9 12:56:42 2006
# Last update: Mon Feb 20 10:18:02 2006
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -904,7 +904,7 @@ wg302v2 MACH_WG302V2 WG302V2 890
eb42x MACH_EB42X EB42X 891
iq331es MACH_IQ331ES IQ331ES 892
cosydsp MACH_COSYDSP COSYDSP 893
uplat7d MACH_UPLAT7D UPLAT7D 894
uplat7d_proto MACH_UPLAT7D UPLAT7D 894
ptdavinci MACH_PTDAVINCI PTDAVINCI 895
mbus MACH_MBUS MBUS 896
nadia2vb MACH_NADIA2VB NADIA2VB 897
@ -938,3 +938,34 @@ auckland MACH_AUCKLAND AUCKLAND 924
ak3220m MACH_AK3320M AK3320M 925
duramax MACH_DURAMAX DURAMAX 926
n35 MACH_N35 N35 927
pronghorn MACH_PRONGHORN PRONGHORN 928
fundy MACH_FUNDY FUNDY 929
logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930
cpu777 MACH_CPU777 CPU777 931
simicon9201 MACH_SIMICON9201 SIMICON9201 932
leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933
cm922txa10 MACH_CM922TXA10 CM922TXA10 934
sandgate MACH_PXA PXA 935
sandgate2 MACH_SANDGATE2 SANDGATE2 936
sandgate2g MACH_SANDGATE2G SANDGATE2G 937
sandgate2p MACH_SANDGATE2P SANDGATE2P 938
fred_jack MACH_FRED_JACK FRED_JACK 939
ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940
nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941
netdcu8 MACH_NETDCU8 NETDCU8 942
ml675050_cpu_boa MACH_ML675050_CPU_BOA ML675050_CPU_BOA 943
ng_fvx538 MACH_NG_FVX538 NG_FVX538 944
ng_fvs338 MACH_NG_FVS338 NG_FVS338 945
pnx4103 MACH_PNX4103 PNX4103 946
hesdb MACH_HESDB HESDB 947
xsilo MACH_XSILO XSILO 948
espresso MACH_ESPRESSO ESPRESSO 949
emlc MACH_EMLC EMLC 950
sisteron MACH_SISTERON SISTERON 951
rx1950 MACH_RX1950 RX1950 952
tsc_venus MACH_TSC_VENUS TSC_VENUS 953
ds101j MACH_DS101J DS101J 954
mxc300_30ads MACH_MXC30030ADS MXC30030ADS 955
fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956
dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957
gesbc9312 MACH_GESBC9312 GESBC9312 958

View File

@ -34,7 +34,7 @@ config GDB_DEBUG
help
gdb stub exception support
config CONFIG_SH_STANDARD_BIOS
config SH_STANDARD_BIOS
bool "Use gdb protocol serial console"
depends on (!H8300H_SIM && !H8S_SIM)
help

View File

@ -328,7 +328,7 @@ CONFIG_FULLDEBUG=y
CONFIG_NO_KERNEL_MSG=y
# CONFIG_SYSCALL_PRINT is not set
# CONFIG_GDB_DEBUG is not set
# CONFIG_CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_SH_STANDARD_BIOS is not set
# CONFIG_DEFAULT_CMDLINE is not set
# CONFIG_BLKDEV_RESERVE is not set

View File

@ -710,7 +710,7 @@ void __init get_smp_config (void)
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc((void *)mpf->mpf_physptr)) {
if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
smp_found_config = 0;
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");

View File

@ -29,28 +29,7 @@
/*
* sys_tas() - test-and-set
* linuxthreads testing version
*/
#ifndef CONFIG_SMP
asmlinkage int sys_tas(int *addr)
{
int oldval;
unsigned long flags;
if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
return -EFAULT;
local_irq_save(flags);
oldval = *addr;
if (!oldval)
*addr = 1;
local_irq_restore(flags);
return oldval;
}
#else /* CONFIG_SMP */
#include <linux/spinlock.h>
static DEFINE_SPINLOCK(tas_lock);
asmlinkage int sys_tas(int *addr)
{
int oldval;
@ -58,15 +37,43 @@ asmlinkage int sys_tas(int *addr)
if (!access_ok(VERIFY_WRITE, addr, sizeof (int)))
return -EFAULT;
_raw_spin_lock(&tas_lock);
oldval = *addr;
if (!oldval)
*addr = 1;
_raw_spin_unlock(&tas_lock);
/* atomic operation:
* oldval = *addr; *addr = 1;
*/
__asm__ __volatile__ (
DCACHE_CLEAR("%0", "r4", "%1")
" .fillinsn\n"
"1:\n"
" lock %0, @%1 -> unlock %2, @%1\n"
"2:\n"
/* NOTE:
* The m32r processor can accept interrupts only
* at the 32-bit instruction boundary.
* So, in the above code, the "unlock" instruction
* can be executed continuously after the "lock"
* instruction execution without any interruptions.
*/
".section .fixup,\"ax\"\n"
" .balign 4\n"
"3: ldi %0, #%3\n"
" seth r14, #high(2b)\n"
" or3 r14, r14, #low(2b)\n"
" jmp r14\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .balign 4\n"
" .long 1b,3b\n"
".previous\n"
: "=&r" (oldval)
: "r" (addr), "r" (1), "i"(-EFAULT)
: "r14", "memory"
#ifdef CONFIG_CHIP_M32700_TS1
, "r4"
#endif /* CONFIG_CHIP_M32700_TS1 */
);
return oldval;
}
#endif /* CONFIG_SMP */
/*
* sys_pipe() is the normal C calling standard for creating

View File

@ -129,6 +129,9 @@ void machine_power_off(void)
for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void show_regs(struct pt_regs * regs)
{
printk("\n");

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.15-rc2
# Thu Nov 24 01:06:21 2005
# Linux kernel version: 2.6.16-rc4
# Tue Feb 21 13:44:31 2006
#
CONFIG_MIPS=y
@ -144,7 +144,6 @@ CONFIG_PREEMPT_BKL=y
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@ -250,6 +249,7 @@ CONFIG_NET=y
#
# Networking options
#
# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
@ -289,6 +289,7 @@ CONFIG_TCP_CONG_BIC=y
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@ -448,7 +449,7 @@ CONFIG_SCSI_SAS_ATTRS=m
#
# SCSI low-level drivers
#
CONFIG_ISCSI_TCP=m
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@ -773,6 +774,10 @@ CONFIG_USB_ARCH_HAS_OHCI=y
# SN Devices
#
#
# EDAC - error detection and reporting (RAS)
#
#
# File systems
#

View File

@ -103,8 +103,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
value->tv_usec /= NSEC_PER_USEC;
long rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#define ELF_CORE_EFLAGS EF_MIPS_ABI2

View File

@ -105,8 +105,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
* one divide.
*/
u64 nsec = (u64)jiffies * TICK_NSEC;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec);
value->tv_usec /= NSEC_PER_USEC;
long rem;
value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
value->tv_usec = rem / NSEC_PER_USEC;
}
#undef ELF_CORE_COPY_REGS

View File

@ -230,6 +230,9 @@ sysn32_waitid(int which, compat_pid_t pid,
long ret;
mm_segment_t old_fs = get_fs();
if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo)))
return -EFAULT;
set_fs (KERNEL_DS);
ret = sys_waitid(which, pid, uinfo, options,
uru ? (struct rusage __user *) &ru : NULL);
@ -1450,25 +1453,6 @@ sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *ti
return sys_timer_create(clock, p, timer_id);
}
asmlinkage long
sysn32_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct compat_timespec __user *uts32,
size_t sigsetsize)
{
struct timespec __user *uts = NULL;
if (uts32) {
struct timespec ts;
uts = compat_alloc_user_space(sizeof(struct timespec));
if (get_user(ts.tv_sec, &uts32->tv_sec) ||
get_user(ts.tv_nsec, &uts32->tv_nsec) ||
copy_to_user (uts, &ts, sizeof (ts)))
return -EFAULT;
}
return sys_rt_sigtimedwait(uthese, uinfo, uts, sigsetsize);
}
save_static_function(sys32_clone);
__attribute_used__ noinline static int
_sys32_clone(nabi_no_regargs struct pt_regs regs)

View File

@ -245,9 +245,9 @@ EXPORT(sysn32_call_table)
PTR sys_capget
PTR sys_capset
PTR sys32_rt_sigpending /* 6125 */
PTR sysn32_rt_sigtimedwait
PTR compat_sys_rt_sigtimedwait
PTR sys_rt_sigqueueinfo
PTR sys32_rt_sigsuspend
PTR sysn32_rt_sigsuspend
PTR sys32_sigaltstack
PTR compat_sys_utime /* 6130 */
PTR sys_mknod

View File

@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000 Ralf Baechle
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
@ -106,8 +106,6 @@ typedef struct compat_siginfo {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
/* 32-bit compatibility types */
#define _NSIG_BPW32 32
@ -198,7 +196,7 @@ __attribute_used__ noinline static int
_sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t *uset;
sigset_t newset, saveset;
sigset_t newset;
uset = (compat_sigset_t *) regs.regs[4];
if (get_sigset(&newset, uset))
@ -206,19 +204,15 @@ _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs.regs[2] = EINTR;
regs.regs[7] = 1;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal32(&saveset, &regs))
return -EINTR;
}
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
save_static_function(sys32_rt_sigsuspend);
@ -226,8 +220,8 @@ __attribute_used__ noinline static int
_sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t *uset;
sigset_t newset, saveset;
size_t sigsetsize;
sigset_t newset;
size_t sigsetsize;
/* XXX Don't preclude handling different sized sigset_t's. */
sigsetsize = regs.regs[5];
@ -240,19 +234,15 @@ _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs.regs[2] = EINTR;
regs.regs[7] = 1;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal32(&saveset, &regs))
return -EINTR;
}
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act,
@ -783,7 +773,7 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
regs->regs[2] = EINTR;
break;
case ERESTARTSYS:
if(!(ka->sa.sa_flags & SA_RESTART)) {
if (!(ka->sa.sa_flags & SA_RESTART)) {
regs->regs[2] = EINTR;
break;
}
@ -810,9 +800,10 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
return ret;
}
int do_signal32(sigset_t *oldset, struct pt_regs *regs)
void do_signal32(struct pt_regs *regs)
{
struct k_sigaction ka;
sigset_t *oldset;
siginfo_t info;
int signr;
@ -822,17 +813,30 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs)
* if so.
*/
if (!user_mode(regs))
return 1;
return;
if (try_to_freeze())
goto no_signal;
if (!oldset)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0)
return handle_signal(signr, &info, &ka, oldset, regs);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
/*
* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TIF_RESTORE_SIGMASK flag.
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
}
no_signal:
/*
@ -853,7 +857,15 @@ no_signal:
regs->cp0_epc -= 4;
}
}
return 0;
/*
* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act,

View File

@ -81,6 +81,39 @@ struct rt_sigframe_n32 {
#endif
};
extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat);
save_static_function(sysn32_rt_sigsuspend);
__attribute_used__ noinline static int
_sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
{
compat_sigset_t __user *unewset, uset;
size_t sigsetsize;
sigset_t newset;
/* XXX Don't preclude handling different sized sigset_t's. */
sigsetsize = regs.regs[5];
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
unewset = (compat_sigset_t __user *) regs.regs[4];
if (copy_from_user(&uset, unewset, sizeof(uset)))
return -EFAULT;
sigset_from_compat (&newset, &uset);
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_thread_flag(TIF_RESTORE_SIGMASK);
return -ERESTARTNOHAND;
}
save_static_function(sysn32_rt_sigreturn);
__attribute_used__ noinline static void
_sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)

View File

@ -29,6 +29,7 @@
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
@ -424,6 +425,25 @@ void flush_tlb_one(unsigned long vaddr)
local_flush_tlb_one(vaddr);
}
static DEFINE_PER_CPU(struct cpu, cpu_devices);
static int __init topology_init(void)
{
int cpu;
int ret;
for_each_cpu(cpu) {
ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", cpu, ret);
}
return 0;
}
subsys_initcall(topology_init);
EXPORT_SYMBOL(flush_tlb_page);
EXPORT_SYMBOL(flush_tlb_one);
EXPORT_SYMBOL(cpu_data);

View File

@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
@ -548,6 +548,8 @@ asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;

View File

@ -64,7 +64,7 @@ LEAF(except_vec2_sb1)
sd k0,0x170($0)
sd k1,0x178($0)
#if CONFIG_SB1_CEX_ALWAYS_FATAL
#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL
j handle_vec2_sb1
nop
#else

View File

@ -94,7 +94,7 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
void prom_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long) task_thread_info(idle);
unsigned long sp = __KSTK_TOP(idle);
unsigned long sp = __KSTK_TOS(idle);
secondary_sp = sp;
secondary_gp = gp;

View File

@ -102,11 +102,11 @@ config SIMULATION
Build a kernel suitable for running under the GDB simulator.
Primarily adjusts the kernel's notion of time.
config CONFIG_SB1_CEX_ALWAYS_FATAL
config SB1_CEX_ALWAYS_FATAL
bool "All cache exceptions considered fatal (no recovery attempted)"
depends on SIBYTE_SB1xxx_SOC
config CONFIG_SB1_CERR_STALL
config SB1_CERR_STALL
bool "Stall (rather than panic) on fatal cache error"
depends on SIBYTE_SB1xxx_SOC

View File

@ -139,7 +139,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
#ifdef CONFIG_SMP
static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
{
int i = 0, old_cpu, cpu, int_on;
int i = 0, old_cpu, cpu, int_on, k;
u64 cur_ints;
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
@ -165,7 +165,6 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask)
irq_dirty -= BCM1480_NR_IRQS_HALF;
}
int k;
for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */
cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING)));
int_on = !(cur_ints & (((u64) 1) << irq_dirty));
@ -216,6 +215,7 @@ static void ack_bcm1480_irq(unsigned int irq)
{
u64 pending;
unsigned int irq_dirty;
int k;
/*
* If the interrupt was an HT interrupt, now is the time to
@ -227,7 +227,6 @@ static void ack_bcm1480_irq(unsigned int irq)
if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) {
irq_dirty -= BCM1480_NR_IRQS_HALF;
}
int k;
for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */
pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq],
R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING))));

View File

@ -157,8 +157,7 @@ _GLOBAL(__secondary_hold)
SET_REG_IMMEDIATE(r4, .hmt_init)
mtctr r4
bctr
#else
#ifdef CONFIG_SMP
#elif defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
mtctr r4
mr r3,r24
@ -166,7 +165,6 @@ _GLOBAL(__secondary_hold)
#else
BUG_OPCODE
#endif
#endif
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"

View File

@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *system_id = "";
unsigned int *lp_index_ptr, lp_index = 0;
struct device_node *rtas_node;
int *lrdrp;
int *lrdrp = NULL;
rootdn = find_path_device("/");
if (rootdn) {
@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "partition_id=%d\n", (int)lp_index);
rtas_node = find_path_device("/rtas");
lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
if (rtas_node)
lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity",
NULL);
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;

View File

@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strcasecmp);
EXPORT_SYMBOL(csum_partial);
@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_PPC_PMAC
EXPORT_SYMBOL(sys_ctrler);
#endif
#ifdef CONFIG_VT
EXPORT_SYMBOL(kd_mksound);
#endif
@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info);
#endif
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__delay);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(irq_desc);
EXPORT_SYMBOL(tb_ticks_per_jiffy);
@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(cacheable_memcpy);
#endif
EXPORT_SYMBOL(__up);
EXPORT_SYMBOL(__down);
EXPORT_SYMBOL(__down_interruptible);
#ifdef CONFIG_8xx
EXPORT_SYMBOL(cpm_install_handler);
EXPORT_SYMBOL(cpm_free_handler);

View File

@ -311,8 +311,6 @@ void smp_release_cpus(void)
DBG(" <- smp_release_cpus()\n");
}
#else
#define smp_release_cpus()
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
@ -473,10 +471,12 @@ void __init setup_system(void)
check_smt_enabled();
smp_setup_cpu_maps();
#ifdef CONFIG_SMP
/* Release secondary cpus out of their spinloops at 0x60 now that
* we can map physical -> logical CPU ids
*/
smp_release_cpus();
#endif
printk("Starting Linux PPC64 %s\n", system_utsname.version);

View File

@ -176,7 +176,6 @@ struct timex32 {
};
extern int do_adjtimex(struct timex *);
extern void ppc_adjtimex(void);
asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
{
@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp)
ret = do_adjtimex(&txc);
/* adjust the conversion of TB to time of day to track adjtimex */
ppc_adjtimex();
if(put_user(txc.modes, &utp->modes) ||
__put_user(txc.offset, &utp->offset) ||
__put_user(txc.freq, &utp->freq) ||

View File

@ -50,6 +50,7 @@
#include <linux/security.h>
#include <linux/percpu.h>
#include <linux/rtc.h>
#include <linux/jiffies.h>
#include <asm/io.h>
#include <asm/processor.h>
@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec);
unsigned long tb_ticks_per_sec;
u64 tb_to_xs;
unsigned tb_to_us;
unsigned long processor_freq;
#define TICKLEN_SCALE (SHIFT_SCALE - 10)
u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
u64 ticklen_to_xs; /* 0.64 fraction */
/* If last_tick_len corresponds to about 1/HZ seconds, then
last_tick_len << TICKLEN_SHIFT will be about 2^63. */
#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL_GPL(rtc_lock);
@ -113,10 +122,6 @@ extern unsigned long wall_jiffies;
extern struct timezone sys_tz;
static long timezone_offset;
void ppc_adjtimex(void);
static unsigned adjusting_time = 0;
unsigned long ppc_proc_freq;
unsigned long ppc_tb_freq;
@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void)
*/
if (ppc_md.set_rtc_time && ntp_synced() &&
xtime.tv_sec - last_rtc_update >= 659 &&
abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
jiffies - wall_jiffies == 1) {
abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) {
struct rtc_time tm;
to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
tm.tm_year -= 1900;
@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv)
if (__USE_RTC()) {
/* do this the old way */
unsigned long flags, seq;
unsigned int sec, nsec, usec, lost;
unsigned int sec, nsec, usec;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
lost = jiffies - wall_jiffies;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
usec = nsec / 1000 + lost * (1000000 / HZ);
usec = nsec / 1000;
while (usec >= 1000000) {
usec -= 1000000;
++sec;
@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv)
EXPORT_SYMBOL(do_gettimeofday);
/* Synchronize xtime with do_gettimeofday */
static inline void timer_sync_xtime(unsigned long cur_tb)
{
#ifdef CONFIG_PPC64
/* why do we do this? */
struct timeval my_tv;
__do_gettimeofday(&my_tv, cur_tb);
if (xtime.tv_sec <= my_tv.tv_sec) {
xtime.tv_sec = my_tv.tv_sec;
xtime.tv_nsec = my_tv.tv_usec * 1000;
}
#endif
}
/*
* There are two copies of tb_to_xs and stamp_xsec so that no
* lock is needed to access and use these values in
@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb)
{
unsigned long offset;
u64 new_stamp_xsec;
u64 tlen, t2x;
if (__USE_RTC())
return;
tlen = current_tick_length();
offset = cur_tb - do_gtod.varp->tb_orig_stamp;
if ((offset & 0x80000000u) == 0)
return;
new_stamp_xsec = do_gtod.varp->stamp_xsec
+ mulhdu(offset, do_gtod.varp->tb_to_xs);
update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
if (tlen == last_tick_len && offset < 0x80000000u) {
/* check that we're still in sync; if not, resync */
struct timeval tv;
__do_gettimeofday(&tv, cur_tb);
if (tv.tv_sec <= xtime.tv_sec &&
(tv.tv_sec < xtime.tv_sec ||
tv.tv_usec * 1000 <= xtime.tv_nsec))
return;
}
if (tlen != last_tick_len) {
t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
last_tick_len = tlen;
} else
t2x = do_gtod.varp->tb_to_xs;
new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
do_div(new_stamp_xsec, 1000000000);
new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
update_gtod(cur_tb, new_stamp_xsec, t2x);
}
#ifdef CONFIG_SMP
@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs)
write_seqlock(&xtime_lock);
tb_last_jiffy += tb_ticks_per_jiffy;
tb_last_stamp = per_cpu(last_jiffy, cpu);
timer_recalc_offset(tb_last_jiffy);
do_timer(regs);
timer_sync_xtime(tb_last_jiffy);
timer_recalc_offset(tb_last_jiffy);
timer_check_rtc();
write_sequnlock(&xtime_lock);
if (adjusting_time && (time_adjust == 0))
ppc_adjtimex();
}
next_dec = tb_ticks_per_jiffy - ticks;
@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs)
void wakeup_decrementer(void)
{
int i;
unsigned long ticks;
set_dec(tb_ticks_per_jiffy);
/*
* We don't expect this to be called on a machine with a 601,
* so using get_tbl is fine.
* The timebase gets saved on sleep and restored on wakeup,
* so all we need to do is to reset the decrementer.
*/
tb_last_stamp = tb_last_jiffy = get_tb();
for_each_cpu(i)
per_cpu(last_jiffy, i) = tb_last_stamp;
ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
if (ticks < tb_ticks_per_jiffy)
ticks = tb_ticks_per_jiffy - ticks;
else
ticks = 1;
set_dec(ticks);
}
#ifdef CONFIG_SMP
@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv)
time_t wtm_sec, new_sec = tv->tv_sec;
long wtm_nsec, new_nsec = tv->tv_nsec;
unsigned long flags;
long int tb_delta;
u64 new_xsec, tb_delta_xs;
u64 new_xsec;
unsigned long tb_delta;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv)
first_settimeofday = 0;
}
#endif
/*
* Subtract off the number of nanoseconds since the
* beginning of the last tick.
* Note that since we don't increment jiffies_64 anywhere other
* than in do_timer (since we don't have a lost tick problem),
* wall_jiffies will always be the same as jiffies,
* and therefore the (jiffies - wall_jiffies) computation
* has been removed.
*/
tb_delta = tb_ticks_since(tb_last_stamp);
tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv)
ntp_clear();
new_xsec = 0;
if (new_nsec != 0) {
new_xsec = (u64)new_nsec * XSEC_PER_SEC;
new_xsec = xtime.tv_nsec;
if (new_xsec != 0) {
new_xsec *= XSEC_PER_SEC;
do_div(new_xsec, NSEC_PER_SEC);
}
new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
@ -671,7 +681,7 @@ void __init time_init(void)
unsigned long flags;
unsigned long tm = 0;
struct div_result res;
u64 scale;
u64 scale, x;
unsigned shift;
if (ppc_md.time_init != NULL)
@ -693,11 +703,36 @@ void __init time_init(void)
}
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
tb_ticks_per_sec = ppc_tb_freq;
tb_ticks_per_usec = ppc_tb_freq / 1000000;
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
tb_to_xs = res.result_low;
/*
* Calculate the length of each tick in ns. It will not be
* exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
* We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
* rounded up.
*/
x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
do_div(x, ppc_tb_freq);
tick_nsec = x;
last_tick_len = x << TICKLEN_SCALE;
/*
* Compute ticklen_to_xs, which is a factor which gets multiplied
* by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
* It is computed as:
* ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
* where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
* so as to give the result as a 0.64 fixed-point fraction.
*/
div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0,
tb_ticks_per_jiffy, &res);
div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
ticklen_to_xs = res.result_low;
/* Compute tb_to_xs from tick_nsec */
tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
/*
* Compute scale factor for sched_clock.
@ -724,6 +759,14 @@ void __init time_init(void)
tm = get_boot_time();
write_seqlock_irqsave(&xtime_lock, flags);
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
tm -= timezone_offset;
}
xtime.tv_sec = tm;
xtime.tv_nsec = 0;
do_gtod.varp = &do_gtod.vars[0];
@ -738,18 +781,11 @@ void __init time_init(void)
vdso_data->tb_orig_stamp = tb_last_jiffy;
vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
vdso_data->tb_to_xs = tb_to_xs;
time_freq = 0;
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
sys_tz.tz_minuteswest = -timezone_offset / 60;
sys_tz.tz_dsttime = 0;
xtime.tv_sec -= timezone_offset;
}
last_rtc_update = xtime.tv_sec;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
@ -759,126 +795,6 @@ void __init time_init(void)
set_dec(tb_ticks_per_jiffy);
}
/*
* After adjtimex is called, adjust the conversion of tb ticks
* to microseconds to keep do_gettimeofday synchronized
* with ntpd.
*
* Use the time_adjust, time_freq and time_offset computed by adjtimex to
* adjust the frequency.
*/
/* #define DEBUG_PPC_ADJTIMEX 1 */
void ppc_adjtimex(void)
{
#ifdef CONFIG_PPC64
unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
new_tb_to_xs, new_xsec, new_stamp_xsec;
unsigned long tb_ticks_per_sec_delta;
long delta_freq, ltemp;
struct div_result divres;
unsigned long flags;
long singleshot_ppm = 0;
/*
* Compute parts per million frequency adjustment to
* accomplish the time adjustment implied by time_offset to be
* applied over the elapsed time indicated by time_constant.
* Use SHIFT_USEC to get it into the same units as
* time_freq.
*/
if ( time_offset < 0 ) {
ltemp = -time_offset;
ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
ltemp >>= SHIFT_KG + time_constant;
ltemp = -ltemp;
} else {
ltemp = time_offset;
ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
ltemp >>= SHIFT_KG + time_constant;
}
/* If there is a single shot time adjustment in progress */
if ( time_adjust ) {
#ifdef DEBUG_PPC_ADJTIMEX
printk("ppc_adjtimex: ");
if ( adjusting_time == 0 )
printk("starting ");
printk("single shot time_adjust = %ld\n", time_adjust);
#endif
adjusting_time = 1;
/*
* Compute parts per million frequency adjustment
* to match time_adjust
*/
singleshot_ppm = tickadj * HZ;
/*
* The adjustment should be tickadj*HZ to match the code in
* linux/kernel/timer.c, but experiments show that this is too
* large. 3/4 of tickadj*HZ seems about right
*/
singleshot_ppm -= singleshot_ppm / 4;
/* Use SHIFT_USEC to get it into the same units as time_freq */
singleshot_ppm <<= SHIFT_USEC;
if ( time_adjust < 0 )
singleshot_ppm = -singleshot_ppm;
}
else {
#ifdef DEBUG_PPC_ADJTIMEX
if ( adjusting_time )
printk("ppc_adjtimex: ending single shot time_adjust\n");
#endif
adjusting_time = 0;
}
/* Add up all of the frequency adjustments */
delta_freq = time_freq + ltemp + singleshot_ppm;
/*
* Compute a new value for tb_ticks_per_sec based on
* the frequency adjustment
*/
den = 1000000 * (1 << (SHIFT_USEC - 8));
if ( delta_freq < 0 ) {
tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
}
else {
tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
}
#ifdef DEBUG_PPC_ADJTIMEX
printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
#endif
/*
* Compute a new value of tb_to_xs (used to convert tb to
* microseconds) and a new value of stamp_xsec which is the
* time (in 1/2^20 second units) corresponding to
* tb_orig_stamp. This new value of stamp_xsec compensates
* for the change in frequency (implied by the new tb_to_xs)
* which guarantees that the current time remains the same.
*/
write_seqlock_irqsave( &xtime_lock, flags );
tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
new_tb_to_xs = divres.result_low;
new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
write_sequnlock_irqrestore( &xtime_lock, flags );
#endif /* CONFIG_PPC64 */
}
#define FEBRUARY 2
#define STARTOFTIME 1970

View File

@ -1646,10 +1646,10 @@ static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode)
KL0_SCC_CELL_ENABLE);
MACIO_BIC(KEYLARGO_FCR1,
/*KL1_USB2_CELL_ENABLE |*/
KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT |
KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE |
KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE);
KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE |
KL1_EIDE0_ENABLE);
if (pmac_mb.board_flags & PMAC_MB_MOBILE)
MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N);
@ -2183,7 +2183,7 @@ static struct pmac_mb_def pmac_mb_defs[] = {
},
{ "PowerMac10,1", "Mac mini",
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER,
PMAC_MB_MAY_SLEEP,
},
{ "iMac,1", "iMac (first generation)",
PMAC_TYPE_ORIG_IMAC, paddington_features,
@ -2295,11 +2295,11 @@ static struct pmac_mb_def pmac_mb_defs[] = {
},
{ "PowerBook5,8", "PowerBook G4 15\"",
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
},
{ "PowerBook5,9", "PowerBook G4 17\"",
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,
PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE,
PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE,
},
{ "PowerBook6,1", "PowerBook G4 12\"",
PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features,

View File

@ -292,7 +292,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
if (start_cpu == RTAS_UNKNOWN_SERVICE)
return 1;
status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu);
status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu);
if (status != 0) {
printk(KERN_ERR "start-cpu failed: %i\n", status);
return 0;

View File

@ -93,15 +93,8 @@ EXPORT_SYMBOL(test_and_change_bit);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(strcasecmp);
EXPORT_SYMBOL(__div64_32);
@ -253,7 +246,6 @@ EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(cacheable_memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);

View File

@ -146,19 +146,6 @@ xmon_map_scc(void)
static int scc_initialized = 0;
void xmon_init_scc(void);
extern void cuda_poll(void);
static inline void do_poll_adb(void)
{
#ifdef CONFIG_ADB_PMU
if (sys_ctrler == SYS_CTRLER_PMU)
pmu_poll_adb();
#endif /* CONFIG_ADB_PMU */
#ifdef CONFIG_ADB_CUDA
if (sys_ctrler == SYS_CTRLER_CUDA)
cuda_poll();
#endif /* CONFIG_ADB_CUDA */
}
int
xmon_write(void *handle, void *ptr, int nb)
@ -189,7 +176,7 @@ xmon_write(void *handle, void *ptr, int nb)
ct = 0;
for (i = 0; i < nb; ++i) {
while ((*sccc & TXRDY) == 0)
do_poll_adb();
;
c = p[i];
if (c == '\n' && !ct) {
c = '\r';

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.16-rc3
# Mon Feb 13 22:31:24 2006
# Linux kernel version: 2.6.16-rc3-git9
# Sat Feb 18 00:27:03 2006
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@ -1317,7 +1317,7 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set

View File

@ -58,6 +58,7 @@
#include <linux/suspend.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsi.h>
#include <asm/uaccess.h>
@ -380,6 +381,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->ref_count++;
rq->flags |= REQ_NOMERGE;
@ -1495,40 +1497,42 @@ static int pkt_set_write_settings(struct pktcdvd_device *pd)
}
/*
* 0 -- we can write to this track, 1 -- we can't
* 1 -- we can write to this track, 0 -- we can't
*/
static int pkt_good_track(track_information *ti)
static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
{
/*
* only good for CD-RW at the moment, not DVD-RW
*/
switch (pd->mmc3_profile) {
case 0x1a: /* DVD+RW */
case 0x12: /* DVD-RAM */
/* The track is always writable on DVD+RW/DVD-RAM */
return 1;
default:
break;
}
/*
* FIXME: only for FP
*/
if (ti->fp == 0)
if (!ti->packet || !ti->fp)
return 0;
/*
* "good" settings as per Mt Fuji.
*/
if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
return 0;
if (ti->rt == 0 && ti->blank == 0)
return 1;
if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
return 0;
if (ti->rt == 0 && ti->blank == 1)
return 1;
if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
return 0;
if (ti->rt == 1 && ti->blank == 0)
return 1;
printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
return 1;
return 0;
}
/*
* 0 -- we can write to this disc, 1 -- we can't
* 1 -- we can write to this disc, 0 -- we can't
*/
static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
{
switch (pd->mmc3_profile) {
case 0x0a: /* CD-RW */
@ -1537,10 +1541,10 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
case 0x1a: /* DVD+RW */
case 0x13: /* DVD-RW */
case 0x12: /* DVD-RAM */
return 0;
return 1;
default:
VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
return 1;
return 0;
}
/*
@ -1549,25 +1553,25 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
*/
if (di->disc_type == 0xff) {
printk("pktcdvd: Unknown disc. No track?\n");
return 1;
return 0;
}
if (di->disc_type != 0x20 && di->disc_type != 0) {
printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
return 1;
return 0;
}
if (di->erasable == 0) {
printk("pktcdvd: Disc not erasable\n");
return 1;
return 0;
}
if (di->border_status == PACKET_SESSION_RESERVED) {
printk("pktcdvd: Can't write to last track (reserved)\n");
return 1;
return 0;
}
return 0;
return 1;
}
static int pkt_probe_settings(struct pktcdvd_device *pd)
@ -1592,23 +1596,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
return ret;
}
if (pkt_good_disc(pd, &di))
return -ENXIO;
if (!pkt_writable_disc(pd, &di))
return -EROFS;
switch (pd->mmc3_profile) {
case 0x1a: /* DVD+RW */
printk("pktcdvd: inserted media is DVD+RW\n");
break;
case 0x13: /* DVD-RW */
printk("pktcdvd: inserted media is DVD-RW\n");
break;
case 0x12: /* DVD-RAM */
printk("pktcdvd: inserted media is DVD-RAM\n");
break;
default:
printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
break;
}
pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
@ -1617,9 +1607,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
return ret;
}
if (pkt_good_track(&ti)) {
if (!pkt_writable_track(pd, &ti)) {
printk("pktcdvd: can't write to this track\n");
return -ENXIO;
return -EROFS;
}
/*
@ -1633,7 +1623,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
}
if (pd->settings.size > PACKET_MAX_SECTORS) {
printk("pktcdvd: packet size is too big\n");
return -ENXIO;
return -EROFS;
}
pd->settings.fp = ti.fp;
pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
@ -1675,7 +1665,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd)
break;
default:
printk("pktcdvd: unknown data mode\n");
return 1;
return -EROFS;
}
return 0;
}
@ -1886,7 +1876,7 @@ static int pkt_open_write(struct pktcdvd_device *pd)
if ((ret = pkt_probe_settings(pd))) {
VPRINTK("pktcdvd: %s failed probe\n", pd->name);
return -EROFS;
return ret;
}
if ((ret = pkt_set_write_settings(pd))) {

View File

@ -15,22 +15,23 @@ config AGP
due to kernel allocation issues), you could use PCI accesses
and have up to a couple gigs of texture space.
Note that this is the only means to have XFree4/GLX use
Note that this is the only means to have X/GLX use
write-combining with MTRR support on the AGP bus. Without it, OpenGL
direct rendering will be a lot slower but still faster than PIO.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
To compile this driver as a module, choose M here: the
module will be called agpgart.
You should say Y here if you want to use GLX or DRI.
If unsure, say N.
config AGP_ALI
tristate "ALI chipset support"
depends on AGP && X86_32
---help---
This option gives you AGP support for the GLX component of
XFree86 4.x on the following ALi chipsets. The supported chipsets
X on the following ALi chipsets. The supported chipsets
include M1541, M1621, M1631, M1632, M1641,M1647,and M1651.
For the ALi-chipset question, ALi suggests you refer to
<http://www.ali.com.tw/eng/support/index.shtml>.
@ -40,28 +41,19 @@ config AGP_ALI
timing issues, this chipset cannot do AGP 2x with the G200.
This is a hardware limitation. AGP 1x seems to be fine, though.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
config AGP_ATI
tristate "ATI chipset support"
depends on AGP && X86_32
---help---
This option gives you AGP support for the GLX component of
XFree86 4.x on the ATI RadeonIGP family of chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
This option gives you AGP support for the GLX component of
X on the ATI RadeonIGP family of chipsets.
config AGP_AMD
tristate "AMD Irongate, 761, and 762 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
XFree86 4.x on AMD Irongate, 761, and 762 chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
X on AMD Irongate, 761, and 762 chipsets.
config AGP_AMD64
tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU
@ -69,45 +61,38 @@ config AGP_AMD64
default y if GART_IOMMU
help
This option gives you AGP support for the GLX component of
XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs.
You still need an external AGP bridge like the AMD 8151, VIA
K8T400M, SiS755. It may also support other AGP bridges when loaded
with agp_try_unsupported=1.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say Y
config AGP_INTEL
tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support"
depends on AGP && X86
help
This option gives you AGP support for the GLX component of XFree86 4.x
This option gives you AGP support for the GLX component of X
on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875,
E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G,
852GM, 855GM, 865G and I915 integrated graphics chipsets.
E7205 and E7505 chipsets and full support for the 810, 815, 830M,
845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI, or if you have any Intel integrated graphics
chipsets. If unsure, say Y.
config AGP_NVIDIA
tristate "NVIDIA nForce/nForce2 chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
XFree86 4.x on the following NVIDIA chipsets. The supported chipsets
include nForce and nForce2
X on NVIDIA chipsets including nForce and nForce2
config AGP_SIS
tristate "SiS chipset support"
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
XFree86 4.x on Silicon Integrated Systems [SiS] chipsets.
X on Silicon Integrated Systems [SiS] chipsets.
Note that 5591/5592 AGP chipsets are NOT supported.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
config AGP_SWORKS
tristate "Serverworks LE/HE chipset support"
@ -121,10 +106,7 @@ config AGP_VIA
depends on AGP && X86_32
help
This option gives you AGP support for the GLX component of
XFree86 4.x on VIA MVP3/Apollo Pro chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N.
X on VIA MVP3/Apollo Pro chipsets.
config AGP_I460
tristate "Intel 460GX chipset support"
@ -159,9 +141,6 @@ config AGP_EFFICEON
This option gives you AGP support for the Transmeta Efficeon
series processors with integrated northbridges.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say Y.
config AGP_SGI_TIOCA
tristate "SGI TIO chipset AGP support"
depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC)

View File

@ -516,8 +516,10 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) )
return -ENODEV;
if ( (apbase & 0x7fff) >> (32 - 25) ) {
printk(KERN_INFO PFX "aperture base > 4G\n");
return -ENODEV;
}
apbase = (apbase & 0x7fff) << 25;

View File

@ -468,9 +468,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
switch (pdev->device) {
case 0x0006:
/* ServerWorks CNB20HE
Fail silently.*/
printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n");
printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n");
return -ENODEV;
case PCI_DEVICE_ID_SERVERWORKS_HE:

View File

@ -202,10 +202,15 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
void i915_driver_irq_uninstall(drm_device_t * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u16 temp;
if (!dev_priv)
return;
I915_WRITE16(I915REG_HWSTAM, 0xffff);
I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
temp = I915_READ16(I915REG_INT_IDENTITY_R);
I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
}

View File

@ -161,6 +161,7 @@ void r300_init_reg_flags(void)
ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
ADD_RANGE(R300_GB_ENABLE, 1);
ADD_RANGE(R300_GB_MSPOS0, 5);
ADD_RANGE(R300_TX_CNTL, 1);
ADD_RANGE(R300_TX_ENABLE, 1);
ADD_RANGE(0x4200, 4);
ADD_RANGE(0x4214, 1);
@ -489,6 +490,50 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
u32 *cmd = (u32 *) cmdbuf->buf;
int count, ret;
RING_LOCALS;
count=(cmd[0]>>16) & 0x3fff;
if (cmd[0] & 0x8000) {
u32 offset;
if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
ret = r300_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
return DRM_ERR(EINVAL);
}
}
if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
ret = r300_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
return DRM_ERR(EINVAL);
}
}
}
BEGIN_RING(count+2);
OUT_RING(cmd[0]);
OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
ADVANCE_RING();
cmdbuf->buf += (count+2)*4;
cmdbuf->bufsz -= (count+2)*4;
return 0;
}
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
@ -527,6 +572,9 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */

View File

@ -451,6 +451,9 @@ I am fairly certain that they are correct unless stated otherwise in comments.
/* END */
/* gap */
/* Zero to flush caches. */
#define R300_TX_CNTL 0x4100
/* The upper enable bits are guessed, based on fglrx reported limits. */
#define R300_TX_ENABLE 0x4104
# define R300_TX_ENABLE_0 (1 << 0)

View File

@ -90,9 +90,10 @@
* 1.19- Add support for gart table in FB memory and PCIE r300
* 1.20- Add support for r300 texrect
* 1.21- Add support for card type getparam
* 1.22- Add support for texture cache flushes (R300_TX_CNTL)
*/
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 21
#define DRIVER_MINOR 22
#define DRIVER_PATCHLEVEL 0
/*

View File

@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = {
static void moom_callback(void *ignored)
{
out_of_memory(GFP_KERNEL, 0);
out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0);
}
static DECLARE_WORK(moom_work, moom_callback, NULL);

View File

@ -348,10 +348,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t
break;
case 32:
E_KEY[4] = le32_to_cpu(in_key[4]);
E_KEY[5] = le32_to_cpu(in_key[5]);
E_KEY[6] = le32_to_cpu(in_key[6]);
t = E_KEY[7] = le32_to_cpu(in_key[7]);
E_KEY[4] = le32_to_cpu(key[4]);
E_KEY[5] = le32_to_cpu(key[5]);
E_KEY[6] = le32_to_cpu(key[6]);
t = E_KEY[7] = le32_to_cpu(key[7]);
for (i = 0; i < 7; ++i)
loop8 (i);
break;

View File

@ -1053,7 +1053,7 @@ static int fc_do_els(fc_channel *fc, unsigned int alpa, void *data, int len)
int i;
fcmd = &_fcmd;
memset(fcmd, 0, sizeof(fcmd));
memset(fcmd, 0, sizeof(fcp_cmnd));
FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa))
fch = &fcmd->fch;
FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa);

View File

@ -137,15 +137,15 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"
/*
* SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on
* if your sbp2 device is not properly handling the SCSI inquiry command.
* This hack makes the inquiry look more like a typical MS Windows
* inquiry.
* This hack makes the inquiry look more like a typical MS Windows inquiry
* by enforcing 36 byte inquiry and avoiding access to mode_sense page 8.
*
* If force_inquiry_hack=1 is required for your device to work,
* please submit the logged sbp2_firmware_revision value of this device to
* the linux1394-devel mailing list.
*/
static int force_inquiry_hack;
module_param(force_inquiry_hack, int, 0444);
module_param(force_inquiry_hack, int, 0644);
MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)");
/*
@ -264,18 +264,17 @@ static struct hpsb_protocol_driver sbp2_driver = {
},
};
/* List of device firmware's that require a forced 36 byte inquiry. */
/*
* List of device firmwares that require the inquiry hack.
* Yields a few false positives but did not break other devices so far.
*/
static u32 sbp2_broken_inquiry_list[] = {
0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */
0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */
/* DViCO Momobay CX-1 */
0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */
/* QPS Fire DVDBurner */
};
#define NUM_BROKEN_INQUIRY_DEVS \
(sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list))
/**************************************
* General utility functions
**************************************/
@ -643,9 +642,15 @@ static int sbp2_remove(struct device *dev)
if (!scsi_id)
return 0;
/* Trigger shutdown functions in scsi's highlevel. */
if (scsi_id->scsi_host)
if (scsi_id->scsi_host) {
/* Get rid of enqueued commands if there is no chance to
* send them. */
if (!sbp2util_node_is_available(scsi_id))
sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT);
/* scsi_remove_device() will trigger shutdown functions of SCSI
* highlevel drivers which would deadlock if blocked. */
scsi_unblock_requests(scsi_id->scsi_host);
}
sdev = scsi_id->sdev;
if (sdev) {
scsi_id->sdev = NULL;
@ -742,11 +747,6 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
hi->host = ud->ne->host;
INIT_LIST_HEAD(&hi->scsi_ids);
/* Register our sbp2 status address space... */
hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops,
SBP2_STATUS_FIFO_ADDRESS,
SBP2_STATUS_FIFO_ADDRESS +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1));
#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
/* Handle data movement if physical dma is not
* enabled/supportedon host controller */
@ -759,6 +759,18 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
/* Register the status FIFO address range. We could use the same FIFO
* for targets at different nodes. However we need different FIFOs per
* target in order to support multi-unit devices. */
scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace(
&sbp2_highlevel, ud->ne->host, &sbp2_ops,
sizeof(struct sbp2_status_block), sizeof(quadlet_t),
~0ULL, ~0ULL);
if (!scsi_id->status_fifo_addr) {
SBP2_ERR("failed to allocate status FIFO address range");
goto failed_alloc;
}
/* Register our host with the SCSI stack. */
scsi_host = scsi_host_alloc(&scsi_driver_template,
sizeof(unsigned long));
@ -997,6 +1009,10 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
SBP2_DMA_FREE("single query logins data");
}
if (scsi_id->status_fifo_addr)
hpsb_unregister_addrspace(&sbp2_highlevel, hi->host,
scsi_id->status_fifo_addr);
scsi_id->ud->device.driver_data = NULL;
SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
@ -1075,11 +1091,10 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
SBP2_DEBUG("sbp2_query_logins: status FIFO initialized");
scsi_id->query_logins_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->query_logins_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
@ -1184,11 +1199,10 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
SBP2_DEBUG("sbp2_login_device: status FIFO initialized");
scsi_id->login_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->login_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@ -1301,10 +1315,10 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->logout_orb->reserved5 = 0x0;
scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) |
SBP2_STATUS_FIFO_ADDRESS_HI);
scsi_id->logout_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->logout_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@ -1366,10 +1380,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1);
scsi_id->reconnect_orb->reserved5 = 0x0;
scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO +
SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id);
scsi_id->reconnect_orb->status_FIFO_hi =
(ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI);
scsi_id->reconnect_orb->status_fifo_hi =
ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
scsi_id->reconnect_orb->status_fifo_lo =
ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
/*
* Byte swap ORB if necessary
@ -1560,7 +1574,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
/* Check for a blacklisted set of devices that require us to force
* a 36 byte host inquiry. This can be overriden as a module param
* (to force all hosts). */
for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) {
for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) {
if ((firmware_revision & 0xffff00) ==
sbp2_broken_inquiry_list[i]) {
SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround",
@ -2006,18 +2020,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
return -EIO;
}
/*
* The scsi stack sends down a request_bufflen which does not match the
* length field in the scsi cdb. This causes some sbp2 devices to
* reject this inquiry command. Fix the request_bufflen.
*/
if (*cmd == INQUIRY) {
if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK)
request_bufflen = cmd[4] = 0x24;
else
request_bufflen = cmd[4];
}
/*
* Now actually fill in the comamnd orb and sbp2 s/g list
*/
@ -2082,9 +2084,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
SBP2_DEBUG("sbp2_check_sbp2_response");
switch (SCpnt->cmnd[0]) {
case INQUIRY:
if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) {
/*
* Make sure data length is ok. Minimum length is 36 bytes
*/
@ -2097,13 +2097,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
*/
scsi_buf[2] |= 2;
scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2;
break;
default:
break;
}
return;
}
/*
@ -2114,7 +2108,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
{
struct sbp2scsi_host_info *hi;
struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp;
u32 id;
struct scsi_cmnd *SCpnt = NULL;
u32 scsi_status = SBP2_SCSI_STATUS_GOOD;
struct sbp2_command_info *command;
@ -2137,12 +2130,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
}
/*
* Find our scsi_id structure by looking at the status fifo address written to by
* the sbp2 device.
* Find our scsi_id structure by looking at the status fifo address
* written to by the sbp2 device.
*/
id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS));
list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) {
if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) {
if (scsi_id_tmp->ne->nodeid == nodeid &&
scsi_id_tmp->status_fifo_addr == addr) {
scsi_id = scsi_id_tmp;
break;
}
@ -2483,7 +2476,16 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
static int sbp2scsi_slave_alloc(struct scsi_device *sdev)
{
((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev;
struct scsi_id_instance_data *scsi_id =
(struct scsi_id_instance_data *)sdev->host->hostdata[0];
scsi_id->sdev = sdev;
if (force_inquiry_hack ||
scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) {
sdev->inquiry_len = 36;
sdev->skip_ms_page_8 = 1;
}
return 0;
}

View File

@ -33,15 +33,17 @@
#define ORB_DIRECTION_NO_DATA_TRANSFER 0x2
#define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31)
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31)
#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */
#define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16)
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id))
#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff)
#define ORB_SET_DATA_SIZE(value) (value & 0xffff)
#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16)
#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19)
#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20)
#define ORB_SET_SPEED(value) ((value & 0x7) << 24)
#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27)
struct sbp2_command_orb {
volatile u32 next_ORB_hi;
@ -76,8 +78,8 @@ struct sbp2_login_orb {
u32 login_response_lo;
u32 lun_misc;
u32 passwd_resp_lengths;
u32 status_FIFO_hi;
u32 status_FIFO_lo;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
#define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff)
@ -102,8 +104,8 @@ struct sbp2_query_logins_orb {
u32 query_response_lo;
u32 lun_misc;
u32 reserved_resp_length;
u32 status_FIFO_hi;
u32 status_FIFO_lo;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
#define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff)
@ -123,8 +125,8 @@ struct sbp2_reconnect_orb {
u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
u32 status_FIFO_hi;
u32 status_FIFO_lo;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
struct sbp2_logout_orb {
@ -134,8 +136,8 @@ struct sbp2_logout_orb {
u32 reserved4;
u32 login_ID_misc;
u32 reserved5;
u32 status_FIFO_hi;
u32 status_FIFO_lo;
u32 status_fifo_hi;
u32 status_fifo_lo;
};
#define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff)
@ -195,30 +197,6 @@ struct sbp2_status_block {
* Miscellaneous SBP2 related config rom defines
*/
/* The status fifo address definition below is used as a base for each
* node, which a chunk seperately assigned to each unit directory in the
* node. For example, 0xfffe00000000ULL is used for the first sbp2 device
* detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node
* 0, and so on.
*
* Note: We could use a single status fifo address for all sbp2 devices,
* and figure out which sbp2 device the status belongs to by looking at
* the source node id of the status write... but, using separate addresses
* for each sbp2 unit directory allows for better code and the ability to
* support multiple luns within a single 1394 node.
*
* Also note that we choose the address range below as it is a region
* specified for write posting, where the ohci controller will
* automatically send an ack_complete when the status is written by the
* sbp2 device... saving a split transaction. =)
*/
#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL
#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe
#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0
#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5)
#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5)
#define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1
#define SBP2_CSR_OFFSET_KEY 0x54
#define SBP2_UNIT_SPEC_ID_KEY 0x12
@ -258,7 +236,6 @@ struct sbp2_status_block {
*/
#define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000
#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */
#define SBP2_MAX_SECTORS 255 /* Max sectors supported */
#define SBP2_MAX_CMDS 8 /* This should be safe */
@ -337,6 +314,11 @@ struct scsi_id_instance_data {
u32 sbp2_lun;
u32 sbp2_firmware_revision;
/*
* Address for the device to write status blocks to
*/
u64 status_fifo_addr;
/*
* Variable used for logins, reconnects, logouts, query logins
*/

View File

@ -1019,8 +1019,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
UDELAY(map, chip, adr, usec)
#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
UDELAY(map, chip, cmd_adr, usec)
/*
* Extra notes:
@ -1052,7 +1052,7 @@ do { \
spin_lock(chip->mutex); \
} while (0)
#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \
do { \
spin_unlock(chip->mutex); \
INVALIDATE_CACHED_RANGE(map, adr, len); \
@ -1284,7 +1284,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
map_write(map, datum, adr);
chip->state = mode;
INVALIDATE_CACHE_UDELAY(map, chip,
INVALIDATE_CACHE_UDELAY(map, chip, adr,
adr, map_bankwidth(map),
chip->word_write_time);
@ -1572,8 +1572,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xd0), cmd_adr);
chip->state = FL_WRITING;
INVALIDATE_CACHE_UDELAY(map, chip,
cmd_adr, len,
INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr,
adr, len,
chip->buffer_write_time);
timeo = jiffies + (HZ/2);
@ -1744,7 +1744,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
INVALIDATE_CACHE_UDELAY(map, chip,
INVALIDATE_CACHE_UDELAY(map, chip, adr,
adr, len,
chip->erase_time*1000/2);

View File

@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self);
static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void write_bulk_callback(struct urb *urb, struct pt_regs *regs);
static void irda_usb_receive(struct urb *urb, struct pt_regs *regs);
static void irda_usb_rx_defer_expired(unsigned long data);
static int irda_usb_net_open(struct net_device *dev);
static int irda_usb_net_close(struct net_device *dev);
static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev)
* on the interrupt pipe and hang the Rx URB only when an interrupt is
* received.
* Jean II
*
* Note : don't read the above as what we are currently doing, but as
* something we could do with KC dongle. Also don't forget that the
* interrupt pipe is not part of the original standard, so this would
* need to be optional...
* Jean II
*/
/*------------------------------------------------------------------*/
@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc
/* Reinitialize URB */
usb_fill_bulk_urb(urb, self->usbdev,
usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep),
skb->data, skb->truesize,
skb->data, IRDA_SKB_MAX_MTU,
irda_usb_receive, skb);
/* Note : unlink *must* be synchronous because of the code in
* irda_usb_net_close() -> free the skb - Jean II */
urb->status = 0;
/* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */
@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
struct irda_skb_cb *cb;
struct sk_buff *newskb;
struct sk_buff *dataskb;
struct urb *next_urb;
int docopy;
IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length);
@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs)
if (urb->status != 0) {
switch (urb->status) {
case -EILSEQ:
self->stats.rx_errors++;
self->stats.rx_crc_errors++;
break;
/* Also precursor to a hot-unplug on UHCI. */
/* Fallthrough... */
case -ECONNRESET: /* -104 */
IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags);
/* Random error, if I remember correctly */
/* uhci_cleanup_unlink() is going to kill the Rx
* URB just after we return. No problem, at this
* point the URB will be idle ;-) - Jean II */
break;
case -ESHUTDOWN: /* -108 */
/* That's usually a hot-unplug. Submit will fail... */
case -ETIMEDOUT: /* -110 */
/* Usually precursor to a hot-unplug on OHCI. */
default:
IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
self->stats.rx_errors++;
IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags);
break;
}
goto done;
/* If we received an error, we don't want to resubmit the
* Rx URB straight away but to give the USB layer a little
* bit of breathing room.
* We are in the USB thread context, therefore there is a
* danger of recursion (new URB we submit fails, we come
* back here).
* With recent USB stack (2.6.15+), I'm seeing that on
* hot unplug of the dongle...
* Lowest effective timer is 10ms...
* Jean II */
self->rx_defer_timer.function = &irda_usb_rx_defer_expired;
self->rx_defer_timer.data = (unsigned long) urb;
mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
return;
}
/* Check for empty frames */
@ -845,13 +868,45 @@ done:
* idle slot....
* Jean II */
/* Note : with this scheme, we could submit the idle URB before
* processing the Rx URB. Another time... Jean II */
* processing the Rx URB. I don't think it would buy us anything as
* we are running in the USB thread context. Jean II */
next_urb = self->idle_rx_urb;
/* Submit the idle URB to replace the URB we've just received */
irda_usb_submit(self, skb, self->idle_rx_urb);
/* Recycle Rx URB : Now, the idle URB is the present one */
urb->context = NULL;
self->idle_rx_urb = urb;
/* Submit the idle URB to replace the URB we've just received.
* Do it last to avoid race conditions... Jean II */
irda_usb_submit(self, skb, next_urb);
}
/*------------------------------------------------------------------*/
/*
* In case of errors, we want the USB layer to have time to recover.
* Now, it is time to resubmit ouur Rx URB...
*/
static void irda_usb_rx_defer_expired(unsigned long data)
{
struct urb *urb = (struct urb *) data;
struct sk_buff *skb = (struct sk_buff *) urb->context;
struct irda_usb_cb *self;
struct irda_skb_cb *cb;
struct urb *next_urb;
IRDA_DEBUG(2, "%s()\n", __FUNCTION__);
/* Find ourselves */
cb = (struct irda_skb_cb *) skb->cb;
IRDA_ASSERT(cb != NULL, return;);
self = (struct irda_usb_cb *) cb->context;
IRDA_ASSERT(self != NULL, return;);
/* Same stuff as when Rx is done, see above... */
next_urb = self->idle_rx_urb;
urb->context = NULL;
self->idle_rx_urb = urb;
irda_usb_submit(self, skb, next_urb);
}
/*------------------------------------------------------------------*/
@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev)
/* Stop network Tx queue */
netif_stop_queue(netdev);
/* Kill defered Rx URB */
del_timer(&self->rx_defer_timer);
/* Deallocate all the Rx path buffers (URBs and skb) */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
struct urb *urb = self->rx_urb[i];
@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf,
self = net->priv;
self->netdev = net;
spin_lock_init(&self->lock);
init_timer(&self->rx_defer_timer);
/* Create all of the needed urbs */
for (i = 0; i < IU_MAX_RX_URBS; i++) {
@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf)
* This will stop/desactivate the Tx path. - Jean II */
self->present = 0;
/* Kill defered Rx URB */
del_timer(&self->rx_defer_timer);
/* We need to have irq enabled to unlink the URBs. That's OK,
* at this point the Tx path is gone - Jean II */
spin_unlock_irqrestore(&self->lock, flags);
@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf)
/* Accept no more transmissions */
/*netif_device_detach(self->netdev);*/
netif_stop_queue(self->netdev);
/* Stop all the receive URBs */
/* Stop all the receive URBs. Must be synchronous. */
for (i = 0; i < IU_MAX_RX_URBS; i++)
usb_kill_urb(self->rx_urb[i]);
/* Cancel Tx and speed URB.
* Toggle flags to make sure it's synchronous. */
* Make sure it's synchronous to avoid races. */
usb_kill_urb(self->tx_urb);
usb_kill_urb(self->speed_urb);
}

View File

@ -136,8 +136,6 @@ struct irda_usb_cb {
__u16 bulk_out_mtu; /* Max Tx packet size in bytes */
__u8 bulk_int_ep; /* Interrupt Endpoint assignments */
wait_queue_head_t wait_q; /* for timeouts */
struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */
struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */
struct urb *tx_urb; /* URB used to send data frames */
@ -147,17 +145,18 @@ struct irda_usb_cb {
struct net_device_stats stats;
struct irlap_cb *irlap; /* The link layer we are binded to */
struct qos_info qos;
hashbin_t *tx_list; /* Queued transmit skb's */
char *speed_buff; /* Buffer for speed changes */
struct timeval stamp;
struct timeval now;
spinlock_t lock; /* For serializing operations */
spinlock_t lock; /* For serializing Tx operations */
__u16 xbofs; /* Current xbofs setting */
__s16 new_xbofs; /* xbofs we need to set */
__u32 speed; /* Current speed */
__s32 new_speed; /* speed we need to set */
struct timer_list rx_defer_timer; /* Wait for Rx error to clear */
};

View File

@ -287,6 +287,20 @@ enum RTL8169_register_content {
TxInterFrameGapShift = 24,
TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
/* Config1 register p.24 */
PMEnable = (1 << 0), /* Power Management Enable */
/* Config3 register p.25 */
MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
/* Config5 register p.27 */
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
MWF = (1 << 5), /* Accept Multicast wakeup frame */
UWF = (1 << 4), /* Accept Unicast wakeup frame */
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
/* TBICSR p.28 */
TBIReset = 0x80000000,
TBILoopback = 0x40000000,
@ -433,6 +447,7 @@ struct rtl8169_private {
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct work_struct task;
unsigned wol_enabled : 1;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
*duplex = p->duplex;
}
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u8 options;
wol->wolopts = 0;
#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
wol->supported = WAKE_ANY;
spin_lock_irq(&tp->lock);
options = RTL_R8(Config1);
if (!(options & PMEnable))
goto out_unlock;
options = RTL_R8(Config3);
if (options & LinkUp)
wol->wolopts |= WAKE_PHY;
if (options & MagicPacket)
wol->wolopts |= WAKE_MAGIC;
options = RTL_R8(Config5);
if (options & UWF)
wol->wolopts |= WAKE_UCAST;
if (options & BWF)
wol->wolopts |= WAKE_BCAST;
if (options & MWF)
wol->wolopts |= WAKE_MCAST;
out_unlock:
spin_unlock_irq(&tp->lock);
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
int i;
static struct {
u32 opt;
u16 reg;
u8 mask;
} cfg[] = {
{ WAKE_ANY, Config1, PMEnable },
{ WAKE_PHY, Config3, LinkUp },
{ WAKE_MAGIC, Config3, MagicPacket },
{ WAKE_UCAST, Config5, UWF },
{ WAKE_BCAST, Config5, BWF },
{ WAKE_MCAST, Config5, MWF },
{ WAKE_ANY, Config5, LanWake }
};
spin_lock_irq(&tp->lock);
RTL_W8(Cfg9346, Cfg9346_Unlock);
for (i = 0; i < ARRAY_SIZE(cfg); i++) {
u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
if (wol->wolopts & cfg[i].opt)
options |= cfg[i].mask;
RTL_W8(cfg[i].reg, options);
}
RTL_W8(Cfg9346, Cfg9346_Lock);
tp->wol_enabled = (wol->wolopts) ? 1 : 0;
spin_unlock_irq(&tp->lock);
return 0;
}
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = {
.get_tso = ethtool_op_get_tso,
.set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
.get_wol = rtl8169_get_wol,
.set_wol = rtl8169_set_wol,
.get_strings = rtl8169_get_strings,
.get_stats_count = rtl8169_get_stats_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out,
}
tp->chipset = i;
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
RTL_W8(Cfg9346, Cfg9346_Lock);
*ioaddr_out = ioaddr;
*dev_out = dev;
out:
@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
netif_stop_queue(dev);
spin_lock_irqsave(&tp->lock, flags);
/* Disable interrupts, stop Rx and Tx */
RTL_W16(IntrMask, 0);
RTL_W8(ChipCmd, 0);
/* Update the error counts. */
tp->stats.rx_missed_errors += RTL_R32(RxMissed);
RTL_W32(RxMissed, 0);
spin_unlock_irqrestore(&tp->lock, flags);
return 0;
}
static int rtl8169_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (!netif_running(dev))
return 0;
netif_device_attach(dev);
rtl8169_hw_start(dev);
return 0;
}
#endif /* CONFIG_PM */
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
struct net_device *dev)
{
@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
return &tp->stats;
}
#ifdef CONFIG_PM
static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
if (!netif_running(dev))
goto out;
netif_device_detach(dev);
netif_stop_queue(dev);
spin_lock_irq(&tp->lock);
rtl8169_asic_down(ioaddr);
tp->stats.rx_missed_errors += RTL_R32(RxMissed);
RTL_W32(RxMissed, 0);
spin_unlock_irq(&tp->lock);
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
out:
return 0;
}
static int rtl8169_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
if (!netif_running(dev))
goto out;
netif_device_attach(dev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
rtl8169_schedule_work(dev, rtl8169_reset_task);
out:
return 0;
}
#endif /* CONFIG_PM */
static struct pci_driver rtl8169_pci_driver = {
.name = MODULENAME,
.id_table = rtl8169_pci_tbl,

View File

@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
int i;
xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
xm_read16(hw, port, XM_PHY_DATA);
*val = xm_read16(hw, port, XM_PHY_DATA);
/* Need to wait for external PHY */
for (i = 0; i < PHY_RETRIES; i++) {
udelay(1);
if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
goto ready;
udelay(1);
}
return -ETIMEDOUT;
@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
ready:
xm_write16(hw, port, XM_PHY_DATA, val);
return 0;
for (i = 0; i < PHY_RETRIES; i++) {
if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static void genesis_init(struct skge_hw *hw)
@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
u32 r;
const u8 zero[6] = { 0 };
/* Clear MIB counters */
xm_write16(hw, port, XM_STAT_CMD,
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
/* Clear two times according to Errata #3 */
xm_write16(hw, port, XM_STAT_CMD,
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
for (i = 0; i < 10; i++) {
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
MFF_SET_MAC_RST);
if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
goto reset_ok;
udelay(1);
}
printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
reset_ok:
/* Unreset the XMAC. */
skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
r |= GP_DIR_2|GP_IO_2;
skge_write32(hw, B2_GP_IO, r);
skge_read32(hw, B2_GP_IO);
/* Enable GMII interface */
xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
for (i = 1; i < 16; i++)
xm_outaddr(hw, port, XM_EXM(i), zero);
/* Clear MIB counters */
xm_write16(hw, port, XM_STAT_CMD,
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
/* Clear two times according to Errata #3 */
xm_write16(hw, port, XM_STAT_CMD,
XM_SC_CLR_RXC | XM_SC_CLR_TXC);
/* configure Rx High Water Mark (XM_RX_HI_WM) */
xm_write16(hw, port, XM_RX_HI_WM, 1450);
@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev)
skge->tx_avail = skge->tx_ring.count - 1;
/* Enable IRQ from port */
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= portirqmask[port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock_irq(&hw->hw_lock);
/* Initialize MAC */
spin_lock_bh(&hw->phy_lock);
@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev)
else
yukon_stop(skge);
spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock_irq(&hw->hw_lock);
/* Stop transmitter */
skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
/* restart receiver */
wmb();
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
CSR_START | CSR_IRQ_CL_F);
skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
*budget -= work_done;
dev->quota -= work_done;
@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
if (work_done >= to_do)
return 1; /* not done */
netif_rx_complete(dev);
hw->intr_mask |= portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
skge_read32(hw, B0_IMSK);
spin_lock_irq(&hw->hw_lock);
__netif_rx_complete(dev);
hw->intr_mask |= portirqmask[skge->port];
skge_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock_irq(&hw->hw_lock);
return 0;
}
@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data)
}
spin_unlock(&hw->phy_lock);
local_irq_disable();
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= IS_EXT_REG;
skge_write32(hw, B0_IMSK, hw->intr_mask);
local_irq_enable();
}
static inline void skge_wakeup(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
prefetch(skge->rx_ring.to_clean);
netif_rx_schedule(dev);
spin_unlock_irq(&hw->hw_lock);
}
static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0) /* hotplug or shared irq */
return IRQ_NONE;
status &= hw->intr_mask;
spin_lock(&hw->hw_lock);
if (status & IS_R1_F) {
skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R1_F;
skge_wakeup(hw->dev[0]);
netif_rx_schedule(hw->dev[0]);
}
if (status & IS_R2_F) {
skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
hw->intr_mask &= ~IS_R2_F;
skge_wakeup(hw->dev[1]);
netif_rx_schedule(hw->dev[1]);
}
if (status & IS_XA1_F)
@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
}
skge_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
hw->pdev = pdev;
spin_lock_init(&hw->phy_lock);
spin_lock_init(&hw->hw_lock);
tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);

View File

@ -2402,6 +2402,7 @@ struct skge_hw {
struct tasklet_struct ext_tasklet;
spinlock_t phy_lock;
spinlock_t hw_lock;
};
enum {

View File

@ -195,11 +195,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
pr_debug("sky2_set_power_state %d\n", state);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control);
power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC);
vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) &&
(power_control & PCI_PM_CAP_PME_D3cold);
pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control);
power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL);
power_control |= PCI_PM_CTRL_PME_STATUS;
power_control &= ~(PCI_PM_CTRL_STATE_MASK);
@ -223,7 +223,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
/* Turn off phy power saving */
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
/* looks like this XL is back asswards .. */
@ -232,18 +232,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
if (hw->ports > 1)
reg1 |= PCI_Y2_PHY2_COMA;
}
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
sky2_pci_write32(hw, PCI_DEV_REG3, 0);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
reg1 &= P_ASPM_CONTROL_MSK;
sky2_pci_write32(hw, PCI_DEV_REG4, reg1);
sky2_pci_write32(hw, PCI_DEV_REG5, 0);
}
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
break;
case PCI_D3hot:
case PCI_D3cold:
/* Turn on phy power saving */
pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg1);
reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
else
reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD);
pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1);
sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
sky2_write8(hw, B2_Y2_CLK_GATE, 0);
@ -265,7 +275,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
ret = -1;
}
pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
return ret;
}
@ -463,16 +473,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
}
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
/* apply fixes in PHY AFE */
gm_phy_write(hw, port, 22, 255);
/* increase differential signal amplitude in 10BASE-T */
gm_phy_write(hw, port, 24, 0xaa99);
gm_phy_write(hw, port, 23, 0x2011);
/* fix for IEEE A/B Symmetry failure in 1000BASE-T */
gm_phy_write(hw, port, 24, 0xa204);
gm_phy_write(hw, port, 23, 0x2002);
/* set page register to 0 */
gm_phy_write(hw, port, 22, 0);
} else {
gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
/* turn on 100 Mbps LED (LED_LINK100) */
ledover |= PHY_M_LED_MO_100(MO_LED_ON);
}
if (ledover)
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
/* turn on 100 Mbps LED (LED_LINK100) */
ledover |= PHY_M_LED_MO_100(MO_LED_ON);
}
if (ledover)
gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
/* Enable phy interrupt on auto-negotiation complete (or link up) */
if (sky2->autoneg == AUTONEG_ENABLE)
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
@ -953,6 +978,12 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) {
/* MAC Rx RAM Read is controlled by hardware */
sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS);
}
sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
rx_set_checksum(sky2);
@ -1035,9 +1066,10 @@ static int sky2_up(struct net_device *dev)
RB_RST_SET);
sky2_qset(hw, txqaddr[port]);
if (hw->chip_id == CHIP_ID_YUKON_EC_U)
sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
/* Set almost empty threshold */
if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1)
sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
@ -1047,8 +1079,10 @@ static int sky2_up(struct net_device *dev)
goto err_out;
/* Enable interrupts from phy/mac for port */
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock_irq(&hw->hw_lock);
return 0;
err_out:
@ -1348,10 +1382,10 @@ static int sky2_down(struct net_device *dev)
netif_stop_queue(dev);
/* Disable port IRQ */
local_irq_disable();
spin_lock_irq(&hw->hw_lock);
hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
local_irq_enable();
spin_unlock_irq(&hw->hw_lock);
flush_scheduled_work();
@ -1633,10 +1667,10 @@ static void sky2_phy_task(void *arg)
out:
up(&sky2->phy_sema);
local_irq_disable();
spin_lock_irq(&hw->hw_lock);
hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
local_irq_enable();
spin_unlock_irq(&hw->hw_lock);
}
@ -1863,6 +1897,17 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
/*
* Kick the STAT_LEV_TIMER_CTRL timer.
* This fixes my hangs on Yukon-EC (0xb6) rev 1.
* The if clause is there to start the timer only if it has been
* configured correctly and not been disabled via ethtool.
*/
if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) {
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP);
sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
}
hwidx = sky2_read16(hw, STAT_PUT_IDX);
BUG_ON(hwidx >= STATUS_RING_SIZE);
rmb();
@ -1945,16 +1990,19 @@ exit_loop:
sky2_tx_check(hw, 0, tx_done[0]);
sky2_tx_check(hw, 1, tx_done[1]);
if (likely(work_done < to_do)) {
/* need to restart TX timer */
if (is_ec_a1(hw)) {
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
}
if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) {
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
}
if (likely(work_done < to_do)) {
spin_lock_irq(&hw->hw_lock);
__netif_rx_complete(dev0);
netif_rx_complete(dev0);
hw->intr_mask |= Y2_IS_STAT_BMU;
sky2_write32(hw, B0_IMSK, hw->intr_mask);
spin_unlock_irq(&hw->hw_lock);
return 0;
} else {
*budget -= work_done;
@ -2017,13 +2065,13 @@ static void sky2_hw_intr(struct sky2_hw *hw)
if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
u16 pci_err;
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
pci_err = sky2_pci_read16(hw, PCI_STATUS);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
pci_name(hw->pdev), pci_err);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config_word(hw->pdev, PCI_STATUS,
sky2_pci_write16(hw, PCI_STATUS,
pci_err | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
}
@ -2032,7 +2080,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* PCI-Express uncorrectable Error occurred */
u32 pex_err;
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
@ -2040,7 +2088,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
0xffffffffUL);
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
@ -2086,6 +2134,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
schedule_work(&sky2->phy_task);
}
@ -2099,6 +2148,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
if (status == 0 || status == ~0)
return IRQ_NONE;
spin_lock(&hw->hw_lock);
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@ -2127,7 +2177,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
sky2_write32(hw, B0_Y2_SP_ICR, 2);
sky2_read32(hw, B0_IMSK);
spin_unlock(&hw->hw_lock);
return IRQ_HANDLED;
}
@ -2170,7 +2220,7 @@ static int sky2_reset(struct sky2_hw *hw)
{
u16 status;
u8 t8, pmd_type;
int i, err;
int i;
sky2_write8(hw, B0_CTST, CS_RST_CLR);
@ -2192,25 +2242,18 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_RST_CLR);
/* clear PCI errors, if any */
err = pci_read_config_word(hw->pdev, PCI_STATUS, &status);
if (err)
goto pci_err;
status = sky2_pci_read16(hw, PCI_STATUS);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
err = pci_write_config_word(hw->pdev, PCI_STATUS,
status | PCI_STATUS_ERROR_BITS);
if (err)
goto pci_err;
sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS);
sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */
if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) {
err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT,
0xffffffffUL);
if (err)
goto pci_err;
}
if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
@ -2309,8 +2352,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7));
}
/* enable status unit */
@ -2321,14 +2363,6 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
return 0;
pci_err:
/* This is to catch a BIOS bug workaround where
* mmconfig table doesn't have other buses.
*/
printk(KERN_ERR PFX "%s: can't access PCI config space\n",
pci_name(hw->pdev));
return err;
}
static u32 sky2_supported_modes(const struct sky2_hw *hw)
@ -2852,11 +2886,11 @@ static int sky2_set_coalesce(struct net_device *dev,
(ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax))
return -EINVAL;
if (ecmd->tx_max_coalesced_frames > 0xffff)
if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1)
return -EINVAL;
if (ecmd->rx_max_coalesced_frames > 0xff)
if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING)
return -EINVAL;
if (ecmd->rx_max_coalesced_frames_irq > 0xff)
if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING)
return -EINVAL;
if (ecmd->tx_coalesce_usecs == 0)
@ -3198,17 +3232,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
}
}
#ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */
{
u32 reg;
pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
reg |= PCI_REV_DESC;
pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
}
#endif
err = -ENOMEM;
hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw) {
@ -3226,6 +3249,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_hw;
}
hw->pm_cap = pm_cap;
spin_lock_init(&hw->hw_lock);
#ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */
{
u32 reg;
reg = sky2_pci_read32(hw, PCI_DEV_REG2);
reg |= PCI_REV_DESC;
sky2_pci_write32(hw, PCI_DEV_REG2, reg);
}
#endif
/* ring for status responses */
hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,

View File

@ -5,14 +5,22 @@
#define _SKY2_H
/* PCI config registers */
#define PCI_DEV_REG1 0x40
#define PCI_DEV_REG2 0x44
#define PCI_DEV_STATUS 0x7c
#define PCI_OS_PCI_X (1<<26)
enum {
PCI_DEV_REG1 = 0x40,
PCI_DEV_REG2 = 0x44,
PCI_DEV_STATUS = 0x7c,
PCI_DEV_REG3 = 0x80,
PCI_DEV_REG4 = 0x84,
PCI_DEV_REG5 = 0x88,
};
#define PEX_LNK_STAT 0xf2
#define PEX_UNC_ERR_STAT 0x104
#define PEX_DEV_CTRL 0xe8
enum {
PEX_DEV_CAP = 0xe4,
PEX_DEV_CTRL = 0xe8,
PEX_DEV_STA = 0xea,
PEX_LNK_STAT = 0xf2,
PEX_UNC_ERR_STAT= 0x104,
};
/* Yukon-2 */
enum pci_dev_reg_1 {
@ -37,6 +45,25 @@ enum pci_dev_reg_2 {
PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
};
/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
enum pci_dev_reg_4 {
/* (Link Training & Status State Machine) */
P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */
/* (Active State Power Management) */
P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */
P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */
P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */
P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */
P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */
P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */
P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */
P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */
P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */
P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN
| P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
};
#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
PCI_STATUS_SIG_SYSTEM_ERROR | \
@ -507,6 +534,16 @@ enum {
};
#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
/* Q_F 32 bit Flag Register */
enum {
F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */
F_EMPTY = 1<<27, /* Tx FIFO: empty flag */
F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */
F_WM_REACHED = 1<<25, /* Watermark reached */
F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */
F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */
};
/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
enum {
@ -909,10 +946,12 @@ enum {
PHY_BCOM_ID1_C0 = 0x6044,
PHY_BCOM_ID1_C5 = 0x6047,
PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */
PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */
PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */
PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */
PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */
};
/* Advertisement register bits */
@ -1837,8 +1876,9 @@ struct sky2_port {
struct sky2_hw {
void __iomem *regs;
struct pci_dev *pdev;
u32 intr_mask;
struct net_device *dev[2];
spinlock_t hw_lock;
u32 intr_mask;
int pm_cap;
int msi;
@ -1912,4 +1952,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
}
/* PCI config space access */
static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg)
{
return sky2_read32(hw, Y2_CFG_SPC + reg);
}
static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg)
{
return sky2_read16(hw, Y2_CFG_SPC + reg);
}
static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val)
{
sky2_write32(hw, Y2_CFG_SPC + reg, val);
}
static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val)
{
sky2_write16(hw, Y2_CFG_SPC + reg, val);
}
#endif

View File

@ -4636,9 +4636,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
default:
IPW_ERROR("Unknown notification: "
"subtype=%d,flags=0x%2x,size=%d\n",
notif->subtype, notif->flags, notif->size);
IPW_DEBUG_NOTIF("Unknown notification: "
"subtype=%d,flags=0x%2x,size=%d\n",
notif->subtype, notif->flags, notif->size);
}
}

View File

@ -55,21 +55,13 @@ config DASD_DIAG
Disks under VM. If you are not running under VM or unsure what it is,
say "N".
config DASD_EER
tristate "Extended error reporting (EER)"
depends on DASD
help
This driver provides a character device interface to the
DASD extended error reporting. This is only needed if you want to
use applications written for the EER facility.
config DASD_CMB
tristate "Compatibility interface for DASD channel measurement blocks"
depends on DASD
help
This driver provides an additional interface to the channel
measurement facility, which is normally accessed though sysfs, with
a set of ioctl functions specific to the dasd driver.
This driver provides an additional interface to the channel measurement
facility, which is normally accessed though sysfs, with a set of
ioctl functions specific to the dasd driver.
This is only needed if you want to use applications written for
linux-2.4 dasd channel measurement facility interface.

View File

@ -5,7 +5,6 @@
dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o
dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o
dasd_diag_mod-objs := dasd_diag.o
dasd_eer_mod-objs := dasd_eer.o
dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \
dasd_genhd.o dasd_erp.o
@ -14,6 +13,5 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o
obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o
obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o
obj-$(CONFIG_DASD_CMB) += dasd_cmb.o
obj-$(CONFIG_DASD_EER) += dasd_eer.o
obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o
obj-$(CONFIG_DCSSBLK) += dcssblk.o

View File

@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/hdreg.h>
#include <linux/notifier.h>
#include <asm/ccwdev.h>
#include <asm/ebcdic.h>
@ -58,7 +57,6 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
static void dasd_flush_ccw_queue(struct dasd_device *, int);
static void dasd_tasklet(struct dasd_device *);
static void do_kick_device(void *data);
static void dasd_disable_eer(struct dasd_device *device);
/*
* SECTION: Operations on the device structure.
@ -153,10 +151,13 @@ dasd_state_new_to_known(struct dasd_device *device)
static inline void
dasd_state_known_to_new(struct dasd_device * device)
{
/* disable extended error reporting for this device */
dasd_disable_eer(device);
/* Forget the discipline information. */
if (device->discipline)
module_put(device->discipline->owner);
device->discipline = NULL;
if (device->base_discipline)
module_put(device->base_discipline->owner);
device->base_discipline = NULL;
device->state = DASD_STATE_NEW;
dasd_free_queue(device);
@ -871,9 +872,6 @@ dasd_handle_state_change_pending(struct dasd_device *device)
struct dasd_ccw_req *cqr;
struct list_head *l, *n;
/* first of all call extended error reporting */
dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL);
device->stopped &= ~DASD_STOPPED_PENDING;
/* restart all 'running' IO on queue */
@ -1093,19 +1091,6 @@ restart:
}
goto restart;
}
/* first of all call extended error reporting */
if (device->eer && cqr->status == DASD_CQR_FAILED) {
dasd_write_eer_trigger(DASD_EER_FATALERROR,
device, cqr);
/* restart request */
cqr->status = DASD_CQR_QUEUED;
cqr->retries = 255;
device->stopped |= DASD_STOPPED_QUIESCE;
goto restart;
}
/* Process finished ERP request. */
if (cqr->refers) {
__dasd_process_erp(device, cqr);
@ -1243,8 +1228,7 @@ __dasd_start_head(struct dasd_device * device)
cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
/* check FAILFAST */
if (device->stopped & ~DASD_STOPPED_PENDING &&
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
(!device->eer)) {
test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) {
cqr->status = DASD_CQR_FAILED;
dasd_schedule_bh(device);
}
@ -1880,9 +1864,10 @@ dasd_generic_remove (struct ccw_device *cdev)
*/
int
dasd_generic_set_online (struct ccw_device *cdev,
struct dasd_discipline *discipline)
struct dasd_discipline *base_discipline)
{
struct dasd_discipline *discipline;
struct dasd_device *device;
int rc;
@ -1890,6 +1875,7 @@ dasd_generic_set_online (struct ccw_device *cdev,
if (IS_ERR(device))
return PTR_ERR(device);
discipline = base_discipline;
if (device->features & DASD_FEATURE_USEDIAG) {
if (!dasd_diag_discipline_pointer) {
printk (KERN_WARNING
@ -1901,6 +1887,16 @@ dasd_generic_set_online (struct ccw_device *cdev,
}
discipline = dasd_diag_discipline_pointer;
}
if (!try_module_get(base_discipline->owner)) {
dasd_delete_device(device);
return -EINVAL;
}
if (!try_module_get(discipline->owner)) {
module_put(base_discipline->owner);
dasd_delete_device(device);
return -EINVAL;
}
device->base_discipline = base_discipline;
device->discipline = discipline;
rc = discipline->check_device(device);
@ -1909,6 +1905,8 @@ dasd_generic_set_online (struct ccw_device *cdev,
"dasd_generic couldn't online device %s "
"with discipline %s rc=%i\n",
cdev->dev.bus_id, discipline->name, rc);
module_put(discipline->owner);
module_put(base_discipline->owner);
dasd_delete_device(device);
return rc;
}
@ -1986,9 +1984,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event)
switch (event) {
case CIO_GONE:
case CIO_NO_PATH:
/* first of all call extended error reporting */
dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL);
if (device->state < DASD_STATE_BASIC)
break;
/* Device is active. We want to keep it. */
@ -2046,51 +2041,6 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver)
put_driver(drv);
}
/*
* notifications for extended error reports
*/
static struct notifier_block *dasd_eer_chain;
int
dasd_register_eer_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&dasd_eer_chain, nb);
}
int
dasd_unregister_eer_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&dasd_eer_chain, nb);
}
/*
* Notify the registered error reporting module of a problem
*/
void
dasd_write_eer_trigger(unsigned int id, struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
if (device->eer) {
struct dasd_eer_trigger temp;
temp.id = id;
temp.device = device;
temp.cqr = cqr;
notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER,
(void *)&temp);
}
}
/*
* Tell the registered error reporting module to disable error reporting for
* a given device and to cleanup any private data structures on that device.
*/
static void
dasd_disable_eer(struct dasd_device *device)
{
notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device);
}
static int __init
dasd_init(void)
{
@ -2172,11 +2122,6 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online);
EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
EXPORT_SYMBOL_GPL(dasd_generic_auto_online);
EXPORT_SYMBOL(dasd_register_eer_notifier);
EXPORT_SYMBOL(dasd_unregister_eer_notifier);
EXPORT_SYMBOL(dasd_write_eer_trigger);
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically

View File

@ -1108,9 +1108,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
case 0x0B:
DEV_MESSAGE(KERN_WARNING, device, "%s",
"FORMAT F - Volume is suspended duplex");
/* call extended error reporting (EER) */
dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device,
erp->refers);
break;
case 0x0C:
DEV_MESSAGE(KERN_WARNING, device, "%s",

View File

@ -29,7 +29,6 @@
#define DASD_ECKD_CCW_PSF 0x27
#define DASD_ECKD_CCW_RSSD 0x3e
#define DASD_ECKD_CCW_LOCATE_RECORD 0x47
#define DASD_ECKD_CCW_SNSS 0x54
#define DASD_ECKD_CCW_DEFINE_EXTENT 0x63
#define DASD_ECKD_CCW_WRITE_MT 0x85
#define DASD_ECKD_CCW_READ_MT 0x86

File diff suppressed because it is too large Load Diff

View File

@ -275,34 +275,6 @@ struct dasd_discipline {
extern struct dasd_discipline *dasd_diag_discipline_pointer;
/*
* Notification numbers for extended error reporting notifications:
* The DASD_EER_DISABLE notification is sent before a dasd_device (and it's
* eer pointer) is freed. The error reporting module needs to do all necessary
* cleanup steps.
* The DASD_EER_TRIGGER notification sends the actual error reports (triggers).
*/
#define DASD_EER_DISABLE 0
#define DASD_EER_TRIGGER 1
/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */
#define DASD_EER_FATALERROR 1
#define DASD_EER_NOPATH 2
#define DASD_EER_STATECHANGE 3
#define DASD_EER_PPRCSUSPEND 4
/*
* The dasd_eer_trigger structure contains all data that we need to send
* along with an DASD_EER_TRIGGER notification.
*/
struct dasd_eer_trigger {
unsigned int id;
struct dasd_device *device;
struct dasd_ccw_req *cqr;
};
struct dasd_device {
/* Block device stuff. */
struct gendisk *gdp;
@ -316,11 +288,9 @@ struct dasd_device {
unsigned long flags; /* per device flags */
unsigned short features; /* copy of devmap-features (read-only!) */
/* extended error reporting stuff (eer) */
void *eer;
/* Device discipline stuff. */
struct dasd_discipline *discipline;
struct dasd_discipline *base_discipline;
char *private;
/* Device state and target state. */
@ -519,12 +489,6 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *);
int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
void dasd_generic_auto_online (struct ccw_driver *);
int dasd_register_eer_notifier(struct notifier_block *);
int dasd_unregister_eer_notifier(struct notifier_block *);
void dasd_write_eer_trigger(unsigned int , struct dasd_device *,
struct dasd_ccw_req *);
/* externals in dasd_devmap.c */
extern int dasd_max_devindex;

View File

@ -165,8 +165,13 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
q_no = q->q_no;
if(!q->is_input_q)
q_no += irq->no_input_qs;
again:
ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
goto again;
}
if (rc < 0) {
QDIO_DBF_TEXT2(1,trace,"eqberr");
sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
@ -195,8 +200,13 @@ qdio_do_sqbs(struct qdio_q *q, unsigned char state,
q_no = q->q_no;
if(!q->is_input_q)
q_no += irq->no_input_qs;
again:
ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
rc = qdio_check_ccq(q, ccq);
if (rc == 1) {
QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
goto again;
}
if (rc < 0) {
QDIO_DBF_TEXT3(1,trace,"sqberr");
sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
@ -1187,8 +1197,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
if (!no_used)
return 1;
if (!q->siga_sync)
if (!q->siga_sync && !irq->is_qebsm)
/* we'll check for more primed buffers in qeth_stop_polling */
return 0;
if (irq->is_qebsm) {

View File

@ -2068,14 +2068,12 @@ static int esp_reset(struct scsi_cmnd *SCptr)
{
struct esp *esp = (struct esp *) SCptr->device->host->hostdata;
spin_lock_irq(esp->ehost->host_lock);
(void) esp_do_resetbus(esp);
spin_unlock_irq(esp->ehost->host_lock);
wait_event(esp->reset_queue, (esp->resetting_bus == 0));
spin_lock_irq(esp->ehost->host_lock);
return SUCCESS;
}

View File

@ -2514,7 +2514,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
assert(sg != NULL);
if (qc->flags & ATA_QCFLAG_SINGLE)
assert(qc->n_elem == 1);
assert(qc->n_elem <= 1);
VPRINTK("unmapping %u sg elements\n", qc->n_elem);
@ -2537,7 +2537,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
kunmap_atomic(addr, KM_IRQ0);
}
} else {
if (sg_dma_len(&sg[0]) > 0)
if (qc->n_elem)
dma_unmap_single(ap->host_set->dev,
sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
dir);
@ -2570,7 +2570,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc)
unsigned int idx;
assert(qc->__sg != NULL);
assert(qc->n_elem > 0);
assert(qc->n_elem > 0 || qc->pad_len > 0);
idx = 0;
ata_for_each_sg(sg, qc) {
@ -2715,6 +2715,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
int dir = qc->dma_dir;
struct scatterlist *sg = qc->__sg;
dma_addr_t dma_address;
int trim_sg = 0;
/* we must lengthen transfers to end on a 32-bit boundary */
qc->pad_len = sg->length & 3;
@ -2734,13 +2735,15 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
sg_dma_len(psg) = ATA_DMA_PAD_SZ;
/* trim sg */
sg->length -= qc->pad_len;
if (sg->length == 0)
trim_sg = 1;
DPRINTK("padding done, sg->length=%u pad_len=%u\n",
sg->length, qc->pad_len);
}
if (!sg->length) {
sg_dma_address(sg) = 0;
if (trim_sg) {
qc->n_elem--;
goto skip_map;
}
@ -2753,9 +2756,9 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
}
sg_dma_address(sg) = dma_address;
skip_map:
sg_dma_len(sg) = sg->length;
skip_map:
DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");

View File

@ -277,7 +277,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
u8 *prd = pp->pkt + QS_CPB_BYTES;
assert(qc->__sg != NULL);
assert(qc->n_elem > 0);
assert(qc->n_elem > 0 || qc->pad_len > 0);
nelem = 0;
ata_for_each_sg(sg, qc) {

View File

@ -90,7 +90,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)
int value;
struct spi_driver *drv = to_spi_driver(dev->driver);
if (!drv->suspend)
if (!drv || !drv->suspend)
return 0;
/* suspend will stop irqs and dma; no more i/o */
@ -105,7 +105,7 @@ static int spi_resume(struct device *dev)
int value;
struct spi_driver *drv = to_spi_driver(dev->driver);
if (!drv->resume)
if (!drv || !drv->resume)
return 0;
/* resume may restart the i/o queue */
@ -449,7 +449,6 @@ void spi_unregister_master(struct spi_master *master)
{
(void) device_for_each_child(master->cdev.dev, NULL, __unregister);
class_device_unregister(&master->cdev);
master->cdev.dev = NULL;
}
EXPORT_SYMBOL_GPL(spi_unregister_master);

View File

@ -1321,8 +1321,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo)
mdelay( 15);
}
#ifdef CONFIG_PPC_OF
static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo)
{
u32 tmp, tmp2;
@ -1836,6 +1834,8 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo)
radeon_pm_m10_enable_lvds_spread_spectrum(rinfo);
}
#ifdef CONFIG_PPC_OF
static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo)
{
OUTREG(MC_CNTL, rinfo->save_regs[46]);
@ -2728,13 +2728,23 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
printk("radeonfb: Dynamic Clock Power Management disabled\n");
}
#if defined(CONFIG_PM)
/* Check if we can power manage on suspend/resume. We can do
* D2 on M6, M7 and M9, and we can resume from D3 cold a few other
* "Mac" cards, but that's all. We need more infos about what the
* BIOS does tho. Right now, all this PM stuff is pmac-only for that
* reason. --BenH
*/
#if defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC)
/* Special case for Samsung P35 laptops
*/
if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) &&
(rinfo->pdev->device == PCI_CHIP_RV350_NP) &&
(rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) &&
(rinfo->pdev->subsystem_device == 0xc00c)) {
rinfo->reinit_func = radeon_reinitialize_M10;
rinfo->pm_mode |= radeon_pm_off;
}
#if defined(CONFIG_PPC_PMAC)
if (_machine == _MACH_Pmac && rinfo->of_node) {
if (rinfo->is_mobility && rinfo->pm_reg &&
rinfo->family <= CHIP_FAMILY_RV250)
@ -2778,7 +2788,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk)
OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000);
#endif
}
#endif /* defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) */
#endif /* defined(CONFIG_PPC_PMAC) */
#endif /* defined(CONFIG_PM) */
}
void radeonfb_pm_exit(struct radeonfb_info *rinfo)

View File

@ -66,7 +66,7 @@ static match_table_t tokens = {
{Opt_afid, "afid=%u"},
{Opt_rfdno, "rfdno=%u"},
{Opt_wfdno, "wfdno=%u"},
{Opt_debug, "debug=%u"},
{Opt_debug, "debug=%x"},
{Opt_name, "name=%s"},
{Opt_remotename, "aname=%s"},
{Opt_unix, "proto=unix"},

View File

@ -1048,13 +1048,14 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
cifs_small_buf_release(iov[0].iov_base);
else if(resp_buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
} else /* return buffer to caller to free */ /* BB FIXME how do we tell caller if it is not a large buffer */ {
*buf = iov[0].iov_base;
} else if(resp_buf_type != CIFS_NO_BUFFER) {
/* return buffer to caller to free */
*buf = iov[0].iov_base;
if(resp_buf_type == CIFS_SMALL_BUFFER)
*pbuf_type = CIFS_SMALL_BUFFER;
else if(resp_buf_type == CIFS_LARGE_BUFFER)
*pbuf_type = CIFS_LARGE_BUFFER;
}
} /* else no valid buffer on return - leave as null */
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */

View File

@ -1795,10 +1795,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
conjunction with 52K kvec constraint on arch with 4K
page size */
if(cifs_sb->rsize < PAGE_CACHE_SIZE) {
cifs_sb->rsize = PAGE_CACHE_SIZE;
/* Windows ME does this */
cFYI(1,("Attempt to set readsize for mount to less than one page (4096)"));
if(cifs_sb->rsize < 2048) {
cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
cFYI(1,("readsize set to minimum 2048"));
}
cifs_sb->mnt_uid = volume_info.linux_uid;
cifs_sb->mnt_gid = volume_info.linux_gid;

View File

@ -204,10 +204,6 @@ int proc_fill_super(struct super_block *s, void *data, int silent)
root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
if (!root_inode)
goto out_no_root;
/*
* Fixup the root inode's nlink value
*/
root_inode->i_nlink += nr_processes();
root_inode->i_uid = 0;
root_inode->i_gid = 0;
s->s_root = d_alloc_root(root_inode);

View File

@ -80,16 +80,16 @@ void __init proc_root_init(void)
proc_bus = proc_mkdir("bus", NULL);
}
static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat
)
{
generic_fillattr(dentry->d_inode, stat);
stat->nlink = proc_root.nlink + nr_processes();
return 0;
}
static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
{
/*
* nr_threads is actually protected by the tasklist_lock;
* however, it's conventional to do reads, especially for
* reporting, without any locking whatsoever.
*/
if (dir->i_ino == PROC_ROOT_INO) /* check for safety... */
dir->i_nlink = proc_root.nlink + nr_threads;
if (!proc_lookup(dir, dentry, nd)) {
return NULL;
}
@ -134,6 +134,7 @@ static struct file_operations proc_root_operations = {
*/
static struct inode_operations proc_root_inode_operations = {
.lookup = proc_root_lookup,
.getattr = proc_root_getattr,
};
/*

View File

@ -666,6 +666,16 @@ static int test_bdev_super(struct super_block *s, void *data)
return (void *)s->s_bdev == data;
}
static void bdev_uevent(struct block_device *bdev, enum kobject_action action)
{
if (bdev->bd_disk) {
if (bdev->bd_part)
kobject_uevent(&bdev->bd_part->kobj, action);
else
kobject_uevent(&bdev->bd_disk->kobj, action);
}
}
struct super_block *get_sb_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int))
@ -707,8 +717,10 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type,
up_write(&s->s_umount);
deactivate_super(s);
s = ERR_PTR(error);
} else
} else {
s->s_flags |= MS_ACTIVE;
bdev_uevent(bdev, KOBJ_MOUNT);
}
}
return s;
@ -724,6 +736,7 @@ void kill_block_super(struct super_block *sb)
{
struct block_device *bdev = sb->s_bdev;
bdev_uevent(bdev, KOBJ_UMOUNT);
generic_shutdown_super(sb);
sync_blockdev(bdev);
close_bdev_excl(bdev);

View File

@ -183,6 +183,7 @@ extern int at91_set_B_periph(unsigned pin, int use_pullup);
extern int at91_set_gpio_input(unsigned pin, int use_pullup);
extern int at91_set_gpio_output(unsigned pin, int value);
extern int at91_set_deglitch(unsigned pin, int is_on);
extern int at91_set_multi_drive(unsigned pin, int is_on);
/* callable at any time */
extern int at91_set_gpio_value(unsigned pin, int value);

View File

@ -19,8 +19,8 @@
#error "Do not include this directly, instead #include <asm/hardware.h>"
#endif
#define NAS100D_SDA_PIN 6
#define NAS100D_SCL_PIN 5
#define NAS100D_SDA_PIN 5
#define NAS100D_SCL_PIN 6
/*
* NAS100D PCI IRQs

View File

@ -239,7 +239,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
" bra 2f; \n"
" .fillinsn \n"
"1:"
M32R_UNLOCK" %2, @%1; \n"
M32R_UNLOCK" %0, @%1; \n"
" .fillinsn \n"
"2:"
: "=&r" (retval)

View File

@ -79,7 +79,7 @@ static __inline__ int irq_canonicalize(int irq)
extern void (*enable_irq)(unsigned int);
extern void (*disable_irq)(unsigned int);
#define enable_irq_nosync enable_irq
#define disable_irq_nosync disable_irq
struct pt_regs;

View File

@ -336,6 +336,7 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
: "d0", "a0", "a1", "d6");
}
#define __raw_writel raw_outl
#endif /* __KERNEL__ */

View File

@ -233,7 +233,7 @@ do { \
#define __get_user_check(x,ptr,size) \
({ \
long __gu_err = -EFAULT; \
const void __user * __gu_ptr = (ptr); \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
@ -258,7 +258,7 @@ do { \
: "=r" (__gu_err), "=r" (__gu_tmp) \
: "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
\
(val) = (__typeof__(val)) __gu_tmp; \
(val) = (__typeof__(*(addr))) __gu_tmp; \
}
/*
@ -284,7 +284,7 @@ do { \
" .previous \n" \
: "=r" (__gu_err), "=&r" (__gu_tmp) \
: "0" (0), "r" (addr), "i" (-EFAULT)); \
(val) = __gu_tmp; \
(val) = (__typeof__(*(addr))) __gu_tmp; \
}
/*

View File

@ -313,7 +313,7 @@
#define __NR_mknodat (__NR_Linux + 290)
#define __NR_fchownat (__NR_Linux + 291)
#define __NR_futimesat (__NR_Linux + 292)
#define __NR_newfstatat (__NR_Linux + 293)
#define __NR_fstatat (__NR_Linux + 293)
#define __NR_unlinkat (__NR_Linux + 294)
#define __NR_renameat (__NR_Linux + 295)
#define __NR_linkat (__NR_Linux + 296)
@ -593,7 +593,7 @@
#define __NR_mknodat (__NR_Linux + 249)
#define __NR_fchownat (__NR_Linux + 250)
#define __NR_futimesat (__NR_Linux + 251)
#define __NR_newfstatat (__NR_Linux + 252)
#define __NR_fstatat (__NR_Linux + 252)
#define __NR_unlinkat (__NR_Linux + 253)
#define __NR_renameat (__NR_Linux + 254)
#define __NR_linkat (__NR_Linux + 255)
@ -877,7 +877,7 @@
#define __NR_mknodat (__NR_Linux + 253)
#define __NR_fchownat (__NR_Linux + 254)
#define __NR_futimesat (__NR_Linux + 255)
#define __NR_newfstatat (__NR_Linux + 256)
#define __NR_fstatat (__NR_Linux + 256)
#define __NR_unlinkat (__NR_Linux + 257)
#define __NR_renameat (__NR_Linux + 258)
#define __NR_linkat (__NR_Linux + 259)

View File

@ -154,19 +154,6 @@ extern char cmd_line[COMMAND_LINE_SIZE];
extern void setup_pci_ptrs(void);
/*
* Power macintoshes have either a CUDA or a PMU controlling
* system reset, power, NVRAM, RTC.
*/
typedef enum sys_ctrler_kind {
SYS_CTRLER_UNKNOWN = 0,
SYS_CTRLER_CUDA = 1,
SYS_CTRLER_PMU = 2,
SYS_CTRLER_SMU = 3,
} sys_ctrler_t;
extern sys_ctrler_t sys_ctrler;
#ifdef CONFIG_SMP
struct smp_ops_t {
void (*message_pass)(int target, int msg);

View File

@ -204,8 +204,7 @@ typedef struct attrib_data_t {
*
* Here ist how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 229 still open
* 230 - 239 DASD extended error reporting
* 32 - 239 still open
* 240 - 255 reserved for EMC
*******************************************************************************/
@ -237,22 +236,12 @@ typedef struct attrib_data_t {
#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
/* Get Attributes (cache operations) */
#define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t)
/* retrieve extended error-reporting value */
#define BIODASDEERGET _IOR(DASD_IOCTL_LETTER,6,int)
/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
/* retrieve extended error-reporting value */
#define BIODASDEERSET _IOW(DASD_IOCTL_LETTER,3,int)
/* remove all records from the eer buffer */
#define DASD_EER_PURGE _IO(DASD_IOCTL_LETTER,230)
/* set the number of pages that are used for the internal eer buffer */
#define DASD_EER_SETBUFSIZE _IOW(DASD_IOCTL_LETTER,230,int)
#endif /* DASD_H */

View File

@ -427,7 +427,8 @@ extern int acpi_mp_config;
extern struct acpi_table_mcfg_config *pci_mmcfg_config;
extern int pci_mmcfg_config_num;
extern int sbf_port ;
extern int sbf_port;
extern unsigned long acpi_video_flags;
#else /* !CONFIG_ACPI */

View File

@ -41,8 +41,10 @@ enum kobject_action {
KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */
KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */
KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */
KOBJ_OFFLINE = (__force kobject_action_t) 0x04, /* device offline */
KOBJ_ONLINE = (__force kobject_action_t) 0x05, /* device online */
KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */
KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */
KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */
KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */
};
struct kobject {

View File

@ -556,6 +556,16 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
return 0;
}
static inline struct scatterlist *
ata_qc_first_sg(struct ata_queued_cmd *qc)
{
if (qc->n_elem)
return qc->__sg;
if (qc->pad_len)
return &qc->pad_sgent;
return NULL;
}
static inline struct scatterlist *
ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
{
@ -563,11 +573,13 @@ ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
return NULL;
if (++sg - qc->__sg < qc->n_elem)
return sg;
return qc->pad_len ? &qc->pad_sgent : NULL;
if (qc->pad_len)
return &qc->pad_sgent;
return NULL;
}
#define ata_for_each_sg(sg, qc) \
for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc))
for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
static inline unsigned int ata_tag_valid(unsigned int tag)
{

View File

@ -1051,7 +1051,11 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask,
void drop_pagecache(void);
void drop_slab(void);
#ifndef CONFIG_MMU
#define randomize_va_space 0
#else
extern int randomize_va_space;
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */

View File

@ -49,7 +49,7 @@ struct mmc_command {
/*
* These are the command types.
*/
#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_TYPE)
#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK)
unsigned int retries; /* max number of retries */
unsigned int error; /* command error */

Some files were not shown because too many files have changed in this diff Show More