dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'x86/urgent' into x86/iommu

This commit is contained in:
Ingo Molnar 2008-12-01 20:36:13 +01:00
commit f6d2e6f57b
163 changed files with 1210 additions and 899 deletions

View File

@ -294,7 +294,9 @@ and is between 256 and 4096 characters. It is defined in the file
Possible values are: Possible values are:
isolate - enable device isolation (each device, as far isolate - enable device isolation (each device, as far
as possible, will get its own protection as possible, will get its own protection
domain) domain) [default]
share - put every device behind one IOMMU into the
same protection domain
fullflush - enable flushing of IO/TLB entries when fullflush - enable flushing of IO/TLB entries when
they are unmapped. Otherwise they are they are unmapped. Otherwise they are
flushed before they will be reused, which flushed before they will be reused, which
@ -1193,8 +1195,8 @@ and is between 256 and 4096 characters. It is defined in the file
it is equivalent to "nosmp", which also disables it is equivalent to "nosmp", which also disables
the IO APIC. the IO APIC.
max_addr=[KMG] [KNL,BOOT,ia64] All physical memory greater than or max_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory greater than
equal to this physical address is ignored. or equal to this physical address is ignored.
max_luns= [SCSI] Maximum number of LUNs to probe. max_luns= [SCSI] Maximum number of LUNs to probe.
Should be between 1 and 2^32-1. Should be between 1 and 2^32-1.
@ -1294,6 +1296,9 @@ and is between 256 and 4096 characters. It is defined in the file
mga= [HW,DRM] mga= [HW,DRM]
min_addr=nn[KMG] [KNL,BOOT,ia64] All physical memory below this
physical address is ignored.
mminit_loglevel= mminit_loglevel=
[KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
parameter allows control of the logging verbosity for parameter allows control of the logging verbosity for

View File

@ -96,7 +96,7 @@ Letting the PHY Abstraction Layer do Everything
static void adjust_link(struct net_device *dev); static void adjust_link(struct net_device *dev);
Next, you need to know the device name of the PHY connected to this device. Next, you need to know the device name of the PHY connected to this device.
The name will look something like, "phy0:0", where the first number is the The name will look something like, "0:00", where the first number is the
bus id, and the second is the PHY's address on that bus. Typically, bus id, and the second is the PHY's address on that bus. Typically,
the bus is responsible for making its ID unique. the bus is responsible for making its ID unique.

View File

@ -1809,7 +1809,7 @@ S: Maintained
FTRACE FTRACE
P: Steven Rostedt P: Steven Rostedt
M: srostedt@redhat.com M: rostedt@goodmis.org
S: Maintained S: Maintained
FUJITSU FR-V (FRV) PORT FUJITSU FR-V (FRV) PORT
@ -3928,8 +3928,6 @@ M: bootc@bootc.net
S: Maintained S: Maintained
SOFTWARE RAID (Multiple Disks) SUPPORT SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
M: mingo@redhat.com
P: Neil Brown P: Neil Brown
M: neilb@suse.de M: neilb@suse.de
L: linux-raid@vger.kernel.org L: linux-raid@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 2 VERSION = 2
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 28 SUBLEVEL = 28
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Killer Bat of Doom NAME = Killer Bat of Doom
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -101,7 +101,7 @@ extern u16 _bfin_swrst; /* shadow for Software Reset Register (SWRST) */
extern unsigned long _ramstart, _ramend, _rambase; extern unsigned long _ramstart, _ramend, _rambase;
extern unsigned long memory_start, memory_end, physical_mem_end; extern unsigned long memory_start, memory_end, physical_mem_end;
extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[], extern char _stext_l1[], _etext_l1[], _sdata_l1[], _edata_l1[], _sbss_l1[],
_ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _ebss_b_l1[], _ebss_l1[], _l1_lma_start[], _sdata_b_l1[], _sbss_b_l1[], _ebss_b_l1[],
_stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[], _stext_l2[], _etext_l2[], _sdata_l2[], _edata_l2[], _sbss_l2[],
_ebss_l2[], _l2_lma_start[]; _ebss_l2[], _l2_lma_start[];

View File

@ -15,7 +15,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_mapping_error static inline
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return 0;
}
/* /*
* Map a single buffer of the indicated size for DMA in streaming mode. * Map a single buffer of the indicated size for DMA in streaming mode.

View File

@ -218,7 +218,7 @@ inline int check_gpio(unsigned gpio)
if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15 if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
|| gpio == GPIO_PH14 || gpio == GPIO_PH15 || gpio == GPIO_PH14 || gpio == GPIO_PH15
|| gpio == GPIO_PJ14 || gpio == GPIO_PJ15 || gpio == GPIO_PJ14 || gpio == GPIO_PJ15
|| gpio > MAX_BLACKFIN_GPIOS) || gpio >= MAX_BLACKFIN_GPIOS)
return -EINVAL; return -EINVAL;
return 0; return 0;
} }

View File

@ -188,10 +188,11 @@ static struct cplb_desc cplb_data[] = {
static u16 __init lock_kernel_check(u32 start, u32 end) static u16 __init lock_kernel_check(u32 start, u32 end)
{ {
if ((end <= (u32) _end && end >= (u32)_stext) || if (start >= (u32)_end || end <= (u32)_stext)
(start <= (u32) _end && start >= (u32)_stext)) return 0;
return IN_KERNEL;
return 0; /* This cplb block overlapped with kernel area. */
return IN_KERNEL;
} }
static unsigned short __init static unsigned short __init

View File

@ -351,9 +351,14 @@ int _access_ok(unsigned long addr, unsigned long size)
return 1; return 1;
#endif #endif
#if L1_DATA_B_LENGTH != 0 #if L1_DATA_B_LENGTH != 0
if (addr >= L1_DATA_B_START if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1)
&& addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH) && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH)
return 1; return 1;
#endif
#if L2_LENGTH != 0
if (addr >= L2_START + (_ebss_l2 - _stext_l2)
&& addr + size <= L2_START + L2_LENGTH)
return 1;
#endif #endif
return 0; return 0;
} }

View File

@ -119,23 +119,23 @@ void __init bfin_relocate_l1_mem(void)
/* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length); dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
l1_data_a_length = _ebss_l1 - _sdata_l1; l1_data_a_length = _sbss_l1 - _sdata_l1;
if (l1_data_a_length > L1_DATA_A_LENGTH) if (l1_data_a_length > L1_DATA_A_LENGTH)
panic("L1 Data SRAM Bank A Overflow\n"); panic("L1 Data SRAM Bank A Overflow\n");
/* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */ /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
l1_data_b_length = _ebss_b_l1 - _sdata_b_l1; l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
if (l1_data_b_length > L1_DATA_B_LENGTH) if (l1_data_b_length > L1_DATA_B_LENGTH)
panic("L1 Data SRAM Bank B Overflow\n"); panic("L1 Data SRAM Bank B Overflow\n");
/* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */ /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
l1_data_a_length, l1_data_b_length); l1_data_a_length, l1_data_b_length);
if (L2_LENGTH != 0) { if (L2_LENGTH != 0) {
l2_length = _ebss_l2 - _stext_l2; l2_length = _sbss_l2 - _stext_l2;
if (l2_length > L2_LENGTH) if (l2_length > L2_LENGTH)
panic("L2 SRAM Overflow\n"); panic("L2 SRAM Overflow\n");
@ -827,7 +827,7 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
bfin_compiled_revid(), bfin_revid()); bfin_compiled_revid(), bfin_revid());
} }
if (bfin_revid() <= CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
CPU, bfin_revid()); CPU, bfin_revid());
} }

View File

@ -59,7 +59,7 @@
#endif #endif
#ifdef CONFIG_VERBOSE_DEBUG #ifdef CONFIG_DEBUG_VERBOSE
#define verbose_printk(fmt, arg...) \ #define verbose_printk(fmt, arg...) \
printk(fmt, ##arg) printk(fmt, ##arg)
#else #else
@ -147,9 +147,12 @@ static void decode_address(char *buf, unsigned long address)
char *name = p->comm; char *name = p->comm;
struct file *file = vma->vm_file; struct file *file = vma->vm_file;
if (file) if (file) {
name = d_path(&file->f_path, _tmpbuf, char *d_name = d_path(&file->f_path, _tmpbuf,
sizeof(_tmpbuf)); sizeof(_tmpbuf));
if (!IS_ERR(d_name))
name = d_name;
}
/* FLAT does not have its text aligned to the start of /* FLAT does not have its text aligned to the start of
* the map while FDPIC ELF does ... * the map while FDPIC ELF does ...
@ -571,7 +574,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
#endif #endif
panic("Kernel exception"); panic("Kernel exception");
} else { } else {
#ifdef CONFIG_VERBOSE_DEBUG #ifdef CONFIG_DEBUG_VERBOSE
unsigned long *stack; unsigned long *stack;
/* Dump the user space stack */ /* Dump the user space stack */
stack = (unsigned long *)rdusp(); stack = (unsigned long *)rdusp();

View File

@ -25,9 +25,13 @@
*/ */
.macro do_flush flushins:req optflushins optnopins label .macro do_flush flushins:req optflushins optnopins label
R2 = -L1_CACHE_BYTES;
/* start = (start & -L1_CACHE_BYTES) */
R0 = R0 & R2;
/* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */ /* end = ((end - 1) & -L1_CACHE_BYTES) + L1_CACHE_BYTES; */
R1 += -1; R1 += -1;
R2 = -L1_CACHE_BYTES;
R1 = R1 & R2; R1 = R1 & R2;
R1 += L1_CACHE_BYTES; R1 += L1_CACHE_BYTES;
@ -63,7 +67,7 @@ ENDPROC(_blackfin_icache_flush_range)
/* Flush all cache lines assocoiated with this area of memory. */ /* Flush all cache lines assocoiated with this area of memory. */
ENTRY(_blackfin_icache_dcache_flush_range) ENTRY(_blackfin_icache_dcache_flush_range)
do_flush IFLUSH, FLUSH do_flush FLUSH, IFLUSH
ENDPROC(_blackfin_icache_dcache_flush_range) ENDPROC(_blackfin_icache_dcache_flush_range)
/* Throw away all D-cached data in specified region without any obligation to /* Throw away all D-cached data in specified region without any obligation to

View File

@ -72,13 +72,13 @@ unsigned int __bfin_cycles_mod;
/**************************************************************************/ /**************************************************************************/
static unsigned int bfin_getfreq(unsigned int cpu) static unsigned int bfin_getfreq_khz(unsigned int cpu)
{ {
/* The driver only support single cpu */ /* The driver only support single cpu */
if (cpu != 0) if (cpu != 0)
return -1; return -1;
return get_cclk(); return get_cclk() / 1000;
} }
@ -96,7 +96,7 @@ static int bfin_target(struct cpufreq_policy *policy,
cclk_hz = bfin_freq_table[index].frequency; cclk_hz = bfin_freq_table[index].frequency;
freqs.old = bfin_getfreq(0); freqs.old = bfin_getfreq_khz(0);
freqs.new = cclk_hz; freqs.new = cclk_hz;
freqs.cpu = 0; freqs.cpu = 0;
@ -137,8 +137,8 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) if (policy->cpu != 0)
return -EINVAL; return -EINVAL;
cclk = get_cclk(); cclk = get_cclk() / 1000;
sclk = get_sclk(); sclk = get_sclk() / 1000;
#if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE)) #if ANOMALY_05000273 || (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_DCACHE))
min_cclk = sclk * 2; min_cclk = sclk * 2;
@ -152,7 +152,7 @@ static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */ dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1; dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
pr_debug("cpufreq: freq:%d csel:%d tscale:%d\n", pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
bfin_freq_table[index].frequency, bfin_freq_table[index].frequency,
dpm_state_table[index].csel, dpm_state_table[index].csel,
dpm_state_table[index].tscale); dpm_state_table[index].tscale);
@ -173,7 +173,7 @@ static struct freq_attr *bfin_freq_attr[] = {
static struct cpufreq_driver bfin_driver = { static struct cpufreq_driver bfin_driver = {
.verify = bfin_verify_speed, .verify = bfin_verify_speed,
.target = bfin_target, .target = bfin_target,
.get = bfin_getfreq, .get = bfin_getfreq_khz,
.init = __bfin_cpu_init, .init = __bfin_cpu_init,
.name = "bfin cpufreq", .name = "bfin cpufreq",
.owner = THIS_MODULE, .owner = THIS_MODULE,

View File

@ -277,7 +277,7 @@ ENTRY(_bfin_return_from_exception)
p5.h = hi(ILAT); p5.h = hi(ILAT);
r6 = [p5]; r6 = [p5];
r7 = 0x20; /* Did I just cause anther HW error? */ r7 = 0x20; /* Did I just cause anther HW error? */
r7 = r7 & r1; r6 = r7 & r6;
CC = R7 == R6; CC = R7 == R6;
if CC JUMP _double_fault; if CC JUMP _double_fault;
#endif #endif

View File

@ -183,10 +183,10 @@ static void __init l2_sram_init(void)
return; return;
} }
free_l2_sram_head.next->paddr = (void *)L2_START + free_l2_sram_head.next->paddr =
(_etext_l2 - _stext_l2) + (_edata_l2 - _sdata_l2); (void *)L2_START + (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->size = L2_LENGTH - free_l2_sram_head.next->size =
(_etext_l2 - _stext_l2) + (_edata_l2 - _sdata_l2); L2_LENGTH - (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->pid = 0; free_l2_sram_head.next->pid = 0;
free_l2_sram_head.next->next = NULL; free_l2_sram_head.next->next = NULL;

View File

@ -226,7 +226,7 @@ extern long ia64_cmpxchg_called_with_bad_pointer (void);
/************************************************/ /************************************************/
#define ia64_ssm IA64_INTRINSIC_MACRO(ssm) #define ia64_ssm IA64_INTRINSIC_MACRO(ssm)
#define ia64_rsm IA64_INTRINSIC_MACRO(rsm) #define ia64_rsm IA64_INTRINSIC_MACRO(rsm)
#define ia64_getreg IA64_INTRINSIC_API(getreg) #define ia64_getreg IA64_INTRINSIC_MACRO(getreg)
#define ia64_setreg IA64_INTRINSIC_API(setreg) #define ia64_setreg IA64_INTRINSIC_API(setreg)
#define ia64_set_rr IA64_INTRINSIC_API(set_rr) #define ia64_set_rr IA64_INTRINSIC_API(set_rr)
#define ia64_get_rr IA64_INTRINSIC_API(get_rr) #define ia64_get_rr IA64_INTRINSIC_API(get_rr)

View File

@ -78,6 +78,19 @@ extern unsigned long ia64_native_getreg_func(int regnum);
ia64_native_rsm(mask); \ ia64_native_rsm(mask); \
} while (0) } while (0)
/* returned ip value should be the one in the caller,
* not in __paravirt_getreg() */
#define paravirt_getreg(reg) \
({ \
unsigned long res; \
BUILD_BUG_ON(!__builtin_constant_p(reg)); \
if ((reg) == _IA64_REG_IP) \
res = ia64_native_getreg(_IA64_REG_IP); \
else \
res = pv_cpu_ops.getreg(reg); \
res; \
})
/****************************************************************************** /******************************************************************************
* replacement of hand written assembly codes. * replacement of hand written assembly codes.
*/ */

View File

@ -499,6 +499,7 @@ GLOBAL_ENTRY(prefetch_stack)
END(prefetch_stack) END(prefetch_stack)
GLOBAL_ENTRY(kernel_execve) GLOBAL_ENTRY(kernel_execve)
rum psr.ac
mov r15=__NR_execve // put syscall number in place mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL break __BREAK_SYSCALL
br.ret.sptk.many rp br.ret.sptk.many rp

View File

@ -260,7 +260,7 @@ start_ap:
* Switch into virtual mode: * Switch into virtual mode:
*/ */
movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
|IA64_PSR_DI) |IA64_PSR_DI|IA64_PSR_AC)
;; ;;
mov cr.ipsr=r16 mov cr.ipsr=r16
movl r17=1f movl r17=1f

View File

@ -1139,7 +1139,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
return previous_current; return previous_current;
no_mod: no_mod:
printk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg); smp_processor_id(), type, msg);
return previous_current; return previous_current;
} }

View File

@ -130,7 +130,7 @@ ia64_native_getreg_func(int regnum)
unsigned long res = -1; unsigned long res = -1;
switch (regnum) { switch (regnum) {
CASE_GET_REG(GP); CASE_GET_REG(GP);
CASE_GET_REG(IP); /*CASE_GET_REG(IP);*/ /* returned ip value shouldn't be constant */
CASE_GET_REG(PSR); CASE_GET_REG(PSR);
CASE_GET_REG(TP); CASE_GET_REG(TP);
CASE_GET_REG(SP); CASE_GET_REG(SP);

View File

@ -19,7 +19,6 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/iommu.h>
dma_addr_t bad_dma_address __read_mostly; dma_addr_t bad_dma_address __read_mostly;
EXPORT_SYMBOL(bad_dma_address); EXPORT_SYMBOL(bad_dma_address);

View File

@ -58,7 +58,7 @@ __HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) __HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
#ifdef CONFIG_IA32_SUPPORT #ifdef CONFIG_IA32_SUPPORT
__HCALL1(xen_get_eflag, HYPERPRIVOP_GET_EFLAG) __HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8 __HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
#endif /* CONFIG_IA32_SUPPORT */ #endif /* CONFIG_IA32_SUPPORT */

View File

@ -84,5 +84,7 @@ extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned
extern unsigned get_434_reg(unsigned reg_offs); extern unsigned get_434_reg(unsigned reg_offs);
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
extern unsigned char get_latch_u5(void); extern unsigned char get_latch_u5(void);
extern void rb532_gpio_set_ilevel(int bit, unsigned gpio);
extern void rb532_gpio_set_istat(int bit, unsigned gpio);
#endif /* _RC32434_GPIO_H_ */ #endif /* _RC32434_GPIO_H_ */

View File

@ -40,12 +40,14 @@
#define BTCS 0x010040 #define BTCS 0x010040
#define BTCOMPARE 0x010044 #define BTCOMPARE 0x010044
#define GPIOBASE 0x050000 #define GPIOBASE 0x050000
#define GPIOCFG 0x050004 /* Offsets relative to GPIOBASE */
#define GPIOD 0x050008 #define GPIOFUNC 0x00
#define GPIOILEVEL 0x05000C #define GPIOCFG 0x04
#define GPIOISTAT 0x050010 #define GPIOD 0x08
#define GPIONMIEN 0x050014 #define GPIOILEVEL 0x0C
#define IMASK6 0x038038 #define GPIOISTAT 0x10
#define GPIONMIEN 0x14
#define IMASK6 0x38
#define LO_WPX (1 << 0) #define LO_WPX (1 << 0)
#define LO_ALE (1 << 1) #define LO_ALE (1 << 1)
#define LO_CLE (1 << 2) #define LO_CLE (1 << 2)

View File

@ -63,7 +63,7 @@ static inline int mips_clockevent_init(void)
/* /*
* Initialize the count register as a clocksource * Initialize the count register as a clocksource
*/ */
#ifdef CONFIG_CEVT_R4K #ifdef CONFIG_CSRC_R4K
extern int init_mips_clocksource(void); extern int init_mips_clocksource(void);
#else #else
static inline int init_mips_clocksource(void) static inline int init_mips_clocksource(void)

View File

@ -27,7 +27,7 @@ int __init init_mips_clocksource(void)
if (!cpu_has_counter || !mips_hpt_frequency) if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO; return -ENXIO;
/* Calclate a somewhat reasonable rating value */ /* Calculate a somewhat reasonable rating value */
clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);

View File

@ -161,7 +161,7 @@ static inline int __init indy_sc_probe(void)
/* XXX Check with wje if the Indy caches can differenciate between /* XXX Check with wje if the Indy caches can differenciate between
writeback + invalidate and just invalidate. */ writeback + invalidate and just invalidate. */
struct bcache_ops indy_sc_ops = { static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable, .bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable, .bc_disable = indy_sc_disable,
.bc_wback_inv = indy_sc_wback_invalidate, .bc_wback_inv = indy_sc_wback_invalidate,

View File

@ -22,9 +22,9 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <asm-mips/addrspace.h> #include <asm/addrspace.h>
#include <asm-mips/mips-boards/launch.h> #include <asm/mips-boards/launch.h>
#include <asm-mips/mipsmtregs.h> #include <asm/mipsmtregs.h>
int amon_cpu_avail(int cpu) int amon_cpu_avail(int cpu)
{ {

View File

@ -118,7 +118,7 @@ static struct platform_device cf_slot0 = {
/* Resources and device for NAND */ /* Resources and device for NAND */
static int rb532_dev_ready(struct mtd_info *mtd) static int rb532_dev_ready(struct mtd_info *mtd)
{ {
return readl(IDT434_REG_BASE + GPIOD) & GPIO_RDY; return gpio_get_value(GPIO_RDY);
} }
static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) static void rb532_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)

View File

@ -39,10 +39,6 @@
struct rb532_gpio_chip { struct rb532_gpio_chip {
struct gpio_chip chip; struct gpio_chip chip;
void __iomem *regbase; void __iomem *regbase;
void (*set_int_level)(struct gpio_chip *chip, unsigned offset, int value);
int (*get_int_level)(struct gpio_chip *chip, unsigned offset);
void (*set_int_status)(struct gpio_chip *chip, unsigned offset, int value);
int (*get_int_status)(struct gpio_chip *chip, unsigned offset);
}; };
struct mpmc_device dev3; struct mpmc_device dev3;
@ -111,15 +107,47 @@ unsigned char get_latch_u5(void)
} }
EXPORT_SYMBOL(get_latch_u5); EXPORT_SYMBOL(get_latch_u5);
/* rb532_set_bit - sanely set a bit
*
* bitval: new value for the bit
* offset: bit index in the 4 byte address range
* ioaddr: 4 byte aligned address being altered
*/
static inline void rb532_set_bit(unsigned bitval,
unsigned offset, void __iomem *ioaddr)
{
unsigned long flags;
u32 val;
bitval = !!bitval; /* map parameter to {0,1} */
local_irq_save(flags);
val = readl(ioaddr);
val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */
val |= ( bitval << offset ); /* set bit if bitval == 1 */
writel(val, ioaddr);
local_irq_restore(flags);
}
/* rb532_get_bit - read a bit
*
* returns the boolean state of the bit, which may be > 1
*/
static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
{
return (readl(ioaddr) & (1 << offset));
}
/* /*
* Return GPIO level */ * Return GPIO level */
static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset) static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
{ {
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch; struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip); gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOD) & mask; return rb532_get_bit(offset, gpch->regbase + GPIOD);
} }
/* /*
@ -128,23 +156,10 @@ static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
static void rb532_gpio_set(struct gpio_chip *chip, static void rb532_gpio_set(struct gpio_chip *chip,
unsigned offset, int value) unsigned offset, int value)
{ {
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch; struct rb532_gpio_chip *gpch;
void __iomem *gpvr;
gpch = container_of(chip, struct rb532_gpio_chip, chip); gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpvr = gpch->regbase + GPIOD; rb532_set_bit(value, offset, gpch->regbase + GPIOD);
local_irq_save(flags);
tmp = readl(gpvr);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpvr);
local_irq_restore(flags);
} }
/* /*
@ -152,21 +167,14 @@ static void rb532_gpio_set(struct gpio_chip *chip,
*/ */
static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{ {
unsigned long flags;
u32 mask = 1 << offset;
u32 value;
struct rb532_gpio_chip *gpch; struct rb532_gpio_chip *gpch;
void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip); gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpdr = gpch->regbase + GPIOCFG;
local_irq_save(flags); if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
value = readl(gpdr); return 1; /* alternate function, GPIOCFG is ignored */
value &= ~mask;
writel(value, gpdr);
local_irq_restore(flags);
rb532_set_bit(0, offset, gpch->regbase + GPIOCFG);
return 0; return 0;
} }
@ -176,99 +184,20 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int rb532_gpio_direction_output(struct gpio_chip *chip, static int rb532_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value) unsigned offset, int value)
{ {
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch; struct rb532_gpio_chip *gpch;
void __iomem *gpdr;
gpch = container_of(chip, struct rb532_gpio_chip, chip); gpch = container_of(chip, struct rb532_gpio_chip, chip);
writel(mask, gpch->regbase + GPIOD);
gpdr = gpch->regbase + GPIOCFG;
local_irq_save(flags); if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC))
tmp = readl(gpdr); return 1; /* alternate function, GPIOCFG is ignored */
tmp |= mask;
writel(tmp, gpdr);
local_irq_restore(flags);
/* set the initial output value */
rb532_set_bit(value, offset, gpch->regbase + GPIOD);
rb532_set_bit(1, offset, gpch->regbase + GPIOCFG);
return 0; return 0;
} }
/*
* Set the GPIO interrupt level
*/
static void rb532_gpio_set_int_level(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpil;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpil = gpch->regbase + GPIOILEVEL;
local_irq_save(flags);
tmp = readl(gpil);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpil);
local_irq_restore(flags);
}
/*
* Get the GPIO interrupt level
*/
static int rb532_gpio_get_int_level(struct gpio_chip *chip, unsigned offset)
{
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOILEVEL) & mask;
}
/*
* Set the GPIO interrupt status
*/
static void rb532_gpio_set_int_status(struct gpio_chip *chip,
unsigned offset, int value)
{
unsigned long flags;
u32 mask = 1 << offset;
u32 tmp;
struct rb532_gpio_chip *gpch;
void __iomem *gpis;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
gpis = gpch->regbase + GPIOISTAT;
local_irq_save(flags);
tmp = readl(gpis);
if (value)
tmp |= mask;
else
tmp &= ~mask;
writel(tmp, gpis);
local_irq_restore(flags);
}
/*
* Get the GPIO interrupt status
*/
static int rb532_gpio_get_int_status(struct gpio_chip *chip, unsigned offset)
{
u32 mask = 1 << offset;
struct rb532_gpio_chip *gpch;
gpch = container_of(chip, struct rb532_gpio_chip, chip);
return readl(gpch->regbase + GPIOISTAT) & mask;
}
static struct rb532_gpio_chip rb532_gpio_chip[] = { static struct rb532_gpio_chip rb532_gpio_chip[] = {
[0] = { [0] = {
.chip = { .chip = {
@ -280,13 +209,35 @@ static struct rb532_gpio_chip rb532_gpio_chip[] = {
.base = 0, .base = 0,
.ngpio = 32, .ngpio = 32,
}, },
.get_int_level = rb532_gpio_get_int_level,
.set_int_level = rb532_gpio_set_int_level,
.get_int_status = rb532_gpio_get_int_status,
.set_int_status = rb532_gpio_set_int_status,
}, },
}; };
/*
* Set GPIO interrupt level
*/
void rb532_gpio_set_ilevel(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOILEVEL);
}
EXPORT_SYMBOL(rb532_gpio_set_ilevel);
/*
* Set GPIO interrupt status
*/
void rb532_gpio_set_istat(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOISTAT);
}
EXPORT_SYMBOL(rb532_gpio_set_istat);
/*
* Configure GPIO alternate function
*/
static void rb532_gpio_set_func(int bit, unsigned gpio)
{
rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC);
}
int __init rb532_gpio_init(void) int __init rb532_gpio_init(void)
{ {
struct resource *r; struct resource *r;
@ -310,9 +261,11 @@ int __init rb532_gpio_init(void)
return -ENXIO; return -ENXIO;
} }
/* Set the interrupt status and level for the CF pin */ /* configure CF_GPIO_NUM as CFRDY IRQ source */
rb532_gpio_set_int_level(&rb532_gpio_chip->chip, CF_GPIO_NUM, 1); rb532_gpio_set_func(0, CF_GPIO_NUM);
rb532_gpio_set_int_status(&rb532_gpio_chip->chip, CF_GPIO_NUM, 0); rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM);
rb532_gpio_set_ilevel(1, CF_GPIO_NUM);
rb532_gpio_set_istat(0, CF_GPIO_NUM);
return 0; return 0;
} }

View File

@ -183,10 +183,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
* being 64 bit in both cases. * being 64 bit in both cases.
*/ */
static long translate_usr_offset(long offset) static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
{ {
if (offset < 0) if (offset < 0)
return -1; return sizeof(struct pt_regs);
else if (offset <= 32*4) /* gr[0..31] */ else if (offset <= 32*4) /* gr[0..31] */
return offset * 2 + 4; return offset * 2 + 4;
else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */ else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
@ -194,7 +194,7 @@ static long translate_usr_offset(long offset)
else if (offset < sizeof(struct pt_regs)/2 + 32*4) else if (offset < sizeof(struct pt_regs)/2 + 32*4)
return offset * 2 + 4 - 32*8; return offset * 2 + 4 - 32*8;
else else
return -1; return sizeof(struct pt_regs);
} }
long compat_arch_ptrace(struct task_struct *child, compat_long_t request, long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
@ -209,7 +209,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1)) if (addr & (sizeof(compat_uint_t)-1))
break; break;
addr = translate_usr_offset(addr); addr = translate_usr_offset(addr);
if (addr < 0) if (addr >= sizeof(struct pt_regs))
break; break;
tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr); tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
@ -236,7 +236,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if (addr & (sizeof(compat_uint_t)-1)) if (addr & (sizeof(compat_uint_t)-1))
break; break;
addr = translate_usr_offset(addr); addr = translate_usr_offset(addr);
if (addr < 0) if (addr >= sizeof(struct pt_regs))
break; break;
if (addr >= PT_FR0 && addr <= PT_FR31 + 4) { if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
/* Special case, fp regs are 64 bits anyway */ /* Special case, fp regs are 64 bits anyway */

View File

@ -338,8 +338,9 @@
#define __NR_dup3 320 #define __NR_dup3 320
#define __NR_pipe2 321 #define __NR_pipe2 321
#define __NR_inotify_init1 322 #define __NR_inotify_init1 322
#define __NR_accept4 323
#define NR_SYSCALLS 323 #define NR_SYSCALLS 324
/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants, /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
* it never had the plain ones and there is no value to adding those * it never had the plain ones and there is no value to adding those

View File

@ -340,8 +340,9 @@
#define __NR_dup3 320 #define __NR_dup3 320
#define __NR_pipe2 321 #define __NR_pipe2 321
#define __NR_inotify_init1 322 #define __NR_inotify_init1 322
#define __NR_accept4 323
#define NR_SYSCALLS 323 #define NR_SYSCALLS 324
#ifdef __KERNEL__ #ifdef __KERNEL__
#define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_IPC_PARSE_VERSION

View File

@ -81,4 +81,4 @@ sys_call_table:
/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait /*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4

View File

@ -150,7 +150,7 @@ sys32_mmap2:
sys32_socketcall: /* %o0=call, %o1=args */ sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1 cmp %o0, 1
bl,pn %xcc, do_einval bl,pn %xcc, do_einval
cmp %o0, 17 cmp %o0, 18
bg,pn %xcc, do_einval bg,pn %xcc, do_einval
sub %o0, 1, %o0 sub %o0, 1, %o0
sllx %o0, 5, %o0 sllx %o0, 5, %o0
@ -319,6 +319,15 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
nop nop
nop nop
nop nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a" .section __ex_table,"a"
.align 4 .align 4
@ -353,4 +362,6 @@ do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int)
.word 57b, __retl_efault, 58b, __retl_efault .word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault .word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault .word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous .previous

View File

@ -82,7 +82,7 @@ sys_call_table32:
.word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate /*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
.word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
@ -156,4 +156,4 @@ sys_call_table:
.word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate /*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
.word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4

View File

@ -167,9 +167,12 @@ config GENERIC_PENDING_IRQ
config X86_SMP config X86_SMP
bool bool
depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
select USE_GENERIC_SMP_HELPERS
default y default y
config USE_GENERIC_SMP_HELPERS
def_bool y
depends on SMP
config X86_32_SMP config X86_32_SMP
def_bool y def_bool y
depends on X86_32 && SMP depends on X86_32 && SMP
@ -957,7 +960,7 @@ config ARCH_PHYS_ADDR_T_64BIT
config NUMA config NUMA
bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)" bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
depends on SMP depends on SMP
depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && BROKEN) depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
default n if X86_PC default n if X86_PC
default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP) default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
help help

View File

@ -74,7 +74,7 @@ static int kbd_pending(void)
{ {
u8 pending; u8 pending;
asm volatile("int $0x16; setnz %0" asm volatile("int $0x16; setnz %0"
: "=rm" (pending) : "=qm" (pending)
: "a" (0x0100)); : "a" (0x0100));
return pending; return pending;
} }

View File

@ -71,15 +71,13 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
/* Make sure we keep the same behaviour */ /* Make sure we keep the same behaviour */
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_64
return 0;
#else
struct dma_mapping_ops *ops = get_dma_ops(dev); struct dma_mapping_ops *ops = get_dma_ops(dev);
if (ops->mapping_error) if (ops->mapping_error)
return ops->mapping_error(dev, dma_addr); return ops->mapping_error(dev, dma_addr);
return (dma_addr == bad_dma_address);
#endif #endif
return (dma_addr == bad_dma_address);
} }
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)

View File

@ -23,12 +23,13 @@
#ifndef _ASM_X86_DS_H #ifndef _ASM_X86_DS_H
#define _ASM_X86_DS_H #define _ASM_X86_DS_H
#ifdef CONFIG_X86_DS
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
#ifdef CONFIG_X86_DS
struct task_struct; struct task_struct;
/* /*
@ -232,7 +233,8 @@ extern void ds_free(struct ds_context *context);
#else /* CONFIG_X86_DS */ #else /* CONFIG_X86_DS */
#define ds_init_intel(config) do {} while (0) struct cpuinfo_x86;
static inline void __cpuinit ds_init_intel(struct cpuinfo_x86 *ignored) {}
#endif /* CONFIG_X86_DS */ #endif /* CONFIG_X86_DS */
#endif /* _ASM_X86_DS_H */ #endif /* _ASM_X86_DS_H */

View File

@ -34,10 +34,14 @@ static inline void get_memcfg_numa(void)
extern int early_pfn_to_nid(unsigned long pfn); extern int early_pfn_to_nid(unsigned long pfn);
extern void resume_map_numa_kva(pgd_t *pgd);
#else /* !CONFIG_NUMA */ #else /* !CONFIG_NUMA */
#define get_memcfg_numa get_memcfg_numa_flat #define get_memcfg_numa get_memcfg_numa_flat
static inline void resume_map_numa_kva(pgd_t *pgd) {}
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM

View File

@ -34,8 +34,6 @@ extern void pci_iommu_alloc(void);
*/ */
#define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys) #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
#if defined(CONFIG_GART_IOMMU) || defined(CONFIG_CALGARY_IOMMU)
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME; dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
@ -49,18 +47,6 @@ extern void pci_iommu_alloc(void);
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL)) (((PTR)->LEN_NAME) = (VAL))
#else
/* No IOMMU */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_X86_PCI_64_H */ #endif /* _ASM_X86_PCI_64_H */

View File

@ -46,7 +46,7 @@ int __copy_from_user(void *dst, const void __user *src, unsigned size)
return ret; return ret;
case 10: case 10:
__get_user_asm(*(u64 *)dst, (u64 __user *)src, __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16); ret, "q", "", "=r", 10);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
__get_user_asm(*(u16 *)(8 + (char *)dst), __get_user_asm(*(u16 *)(8 + (char *)dst),

View File

@ -639,8 +639,8 @@ __SYSCALL(__NR_fallocate, sys_fallocate)
__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
#define __NR_timerfd_gettime 287 #define __NR_timerfd_gettime 287
__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
#define __NR_paccept 288 #define __NR_accept4 288
__SYSCALL(__NR_paccept, sys_paccept) __SYSCALL(__NR_accept4, sys_accept4)
#define __NR_signalfd4 289 #define __NR_signalfd4 289
__SYSCALL(__NR_signalfd4, sys_signalfd4) __SYSCALL(__NR_signalfd4, sys_signalfd4)
#define __NR_eventfd2 290 #define __NR_eventfd2 290

View File

@ -41,7 +41,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o
obj-y += process.o obj-y += process.o
obj-y += i387.o xsave.o obj-y += i387.o xsave.o
obj-y += ptrace.o obj-y += ptrace.o
obj-y += ds.o obj-$(CONFIG_X86_DS) += ds.o
obj-$(CONFIG_X86_32) += tls.o obj-$(CONFIG_X86_32) += tls.o
obj-$(CONFIG_IA32_EMULATION) += tls.o obj-$(CONFIG_IA32_EMULATION) += tls.o
obj-y += step.o obj-y += step.o

View File

@ -538,7 +538,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address >>= PAGE_SHIFT; address >>= PAGE_SHIFT;
iommu_area_free(dom->bitmap, address, pages); iommu_area_free(dom->bitmap, address, pages);
if (address + pages >= dom->next_bit) if (address >= dom->next_bit)
dom->need_flush = true; dom->need_flush = true;
} }

View File

@ -122,7 +122,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */ we find in ACPI */
unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */
int amd_iommu_isolate; /* if 1, device isolation is enabled */ int amd_iommu_isolate = 1; /* if 1, device isolation is enabled */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
@ -1214,7 +1214,9 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) { for (; *str; ++str) {
if (strncmp(str, "isolate", 7) == 0) if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1; amd_iommu_isolate = 1;
if (strncmp(str, "fullflush", 11) == 0) if (strncmp(str, "share", 5) == 0)
amd_iommu_isolate = 0;
if (strncmp(str, "fullflush", 9) == 0)
amd_iommu_unmap_flush = true; amd_iommu_unmap_flush = true;
} }

View File

@ -21,8 +21,6 @@
*/ */
#ifdef CONFIG_X86_DS
#include <asm/ds.h> #include <asm/ds.h>
#include <linux/errno.h> #include <linux/errno.h>
@ -211,14 +209,15 @@ static DEFINE_PER_CPU(struct ds_context *, system_context);
static inline struct ds_context *ds_get_context(struct task_struct *task) static inline struct ds_context *ds_get_context(struct task_struct *task)
{ {
struct ds_context *context; struct ds_context *context;
unsigned long irq;
spin_lock(&ds_lock); spin_lock_irqsave(&ds_lock, irq);
context = (task ? task->thread.ds_ctx : this_system_context); context = (task ? task->thread.ds_ctx : this_system_context);
if (context) if (context)
context->count++; context->count++;
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
return context; return context;
} }
@ -226,18 +225,16 @@ static inline struct ds_context *ds_get_context(struct task_struct *task)
/* /*
* Same as ds_get_context, but allocates the context and it's DS * Same as ds_get_context, but allocates the context and it's DS
* structure, if necessary; returns NULL; if out of memory. * structure, if necessary; returns NULL; if out of memory.
*
* pre: requires ds_lock to be held
*/ */
static inline struct ds_context *ds_alloc_context(struct task_struct *task) static inline struct ds_context *ds_alloc_context(struct task_struct *task)
{ {
struct ds_context **p_context = struct ds_context **p_context =
(task ? &task->thread.ds_ctx : &this_system_context); (task ? &task->thread.ds_ctx : &this_system_context);
struct ds_context *context = *p_context; struct ds_context *context = *p_context;
unsigned long irq;
if (!context) { if (!context) {
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) if (!context)
return NULL; return NULL;
@ -247,18 +244,27 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
return NULL; return NULL;
} }
*p_context = context; spin_lock_irqsave(&ds_lock, irq);
context->this = p_context; if (*p_context) {
context->task = task; kfree(context->ds);
kfree(context);
if (task) context = *p_context;
set_tsk_thread_flag(task, TIF_DS_AREA_MSR); } else {
*p_context = context;
if (!task || (task == current)) context->this = p_context;
wrmsr(MSR_IA32_DS_AREA, (unsigned long)context->ds, 0); context->task = task;
get_tracer(task); if (task)
set_tsk_thread_flag(task, TIF_DS_AREA_MSR);
if (!task || (task == current))
wrmsrl(MSR_IA32_DS_AREA,
(unsigned long)context->ds);
}
spin_unlock_irqrestore(&ds_lock, irq);
} }
context->count++; context->count++;
@ -272,10 +278,12 @@ static inline struct ds_context *ds_alloc_context(struct task_struct *task)
*/ */
static inline void ds_put_context(struct ds_context *context) static inline void ds_put_context(struct ds_context *context)
{ {
unsigned long irq;
if (!context) if (!context)
return; return;
spin_lock(&ds_lock); spin_lock_irqsave(&ds_lock, irq);
if (--context->count) if (--context->count)
goto out; goto out;
@ -297,7 +305,7 @@ static inline void ds_put_context(struct ds_context *context)
kfree(context->ds); kfree(context->ds);
kfree(context); kfree(context);
out: out:
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
} }
@ -368,6 +376,7 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
struct ds_context *context; struct ds_context *context;
unsigned long buffer, adj; unsigned long buffer, adj;
const unsigned long alignment = (1 << 3); const unsigned long alignment = (1 << 3);
unsigned long irq;
int error = 0; int error = 0;
if (!ds_cfg.sizeof_ds) if (!ds_cfg.sizeof_ds)
@ -382,25 +391,27 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
return -EOPNOTSUPP; return -EOPNOTSUPP;
spin_lock(&ds_lock);
if (!check_tracer(task))
return -EPERM;
error = -ENOMEM;
context = ds_alloc_context(task); context = ds_alloc_context(task);
if (!context) if (!context)
return -ENOMEM;
spin_lock_irqsave(&ds_lock, irq);
error = -EPERM;
if (!check_tracer(task))
goto out_unlock; goto out_unlock;
get_tracer(task);
error = -EALREADY; error = -EALREADY;
if (context->owner[qual] == current) if (context->owner[qual] == current)
goto out_unlock; goto out_put_tracer;
error = -EPERM; error = -EPERM;
if (context->owner[qual] != NULL) if (context->owner[qual] != NULL)
goto out_unlock; goto out_put_tracer;
context->owner[qual] = current; context->owner[qual] = current;
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
error = -ENOMEM; error = -ENOMEM;
@ -448,10 +459,17 @@ static int ds_request(struct task_struct *task, void *base, size_t size,
out_release: out_release:
context->owner[qual] = NULL; context->owner[qual] = NULL;
ds_put_context(context); ds_put_context(context);
put_tracer(task);
return error;
out_put_tracer:
spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(context);
put_tracer(task);
return error; return error;
out_unlock: out_unlock:
spin_unlock(&ds_lock); spin_unlock_irqrestore(&ds_lock, irq);
ds_put_context(context); ds_put_context(context);
return error; return error;
} }
@ -801,13 +819,21 @@ static const struct ds_configuration ds_cfg_var = {
.sizeof_ds = sizeof(long) * 12, .sizeof_ds = sizeof(long) * 12,
.sizeof_field = sizeof(long), .sizeof_field = sizeof(long),
.sizeof_rec[ds_bts] = sizeof(long) * 3, .sizeof_rec[ds_bts] = sizeof(long) * 3,
#ifdef __i386__
.sizeof_rec[ds_pebs] = sizeof(long) * 10 .sizeof_rec[ds_pebs] = sizeof(long) * 10
#else
.sizeof_rec[ds_pebs] = sizeof(long) * 18
#endif
}; };
static const struct ds_configuration ds_cfg_64 = { static const struct ds_configuration ds_cfg_64 = {
.sizeof_ds = 8 * 12, .sizeof_ds = 8 * 12,
.sizeof_field = 8, .sizeof_field = 8,
.sizeof_rec[ds_bts] = 8 * 3, .sizeof_rec[ds_bts] = 8 * 3,
#ifdef __i386__
.sizeof_rec[ds_pebs] = 8 * 10 .sizeof_rec[ds_pebs] = 8 * 10
#else
.sizeof_rec[ds_pebs] = 8 * 18
#endif
}; };
static inline void static inline void
@ -861,4 +887,3 @@ void ds_free(struct ds_context *context)
while (leftovers--) while (leftovers--)
ds_put_context(context); ds_put_context(context);
} }
#endif /* CONFIG_X86_DS */

View File

@ -250,31 +250,24 @@ int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
{ {
struct acpi_table_header *header = NULL; struct acpi_table_header *header = NULL;
int i = 0; int i = 0;
acpi_size tbl_size;
while (ACPI_SUCCESS(acpi_get_table_with_size("OEM1", i++, &header, &tbl_size))) { while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) { if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
struct oem_table *t = (struct oem_table *)header; struct oem_table *t = (struct oem_table *)header;
oem_addrX = t->OEMTableAddr; oem_addrX = t->OEMTableAddr;
oem_size = t->OEMTableSize; oem_size = t->OEMTableSize;
early_acpi_os_unmap_memory(header, tbl_size);
*oem_addr = (unsigned long)__acpi_map_table(oem_addrX, *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
oem_size); oem_size);
return 0; return 0;
} }
early_acpi_os_unmap_memory(header, tbl_size);
} }
return -1; return -1;
} }
void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
{ {
if (!oem_addr)
return;
__acpi_unmap_table((char *)oem_addr, oem_size);
} }
#endif #endif

View File

@ -58,7 +58,7 @@ void __cpuinit mxcsr_feature_mask_init(void)
stts(); stts();
} }
void __init init_thread_xstate(void) void __cpuinit init_thread_xstate(void)
{ {
if (!HAVE_HWFP) { if (!HAVE_HWFP) {
xstate_size = sizeof(struct i387_soft_struct); xstate_size = sizeof(struct i387_soft_struct);

View File

@ -1140,6 +1140,20 @@ static void __clear_irq_vector(int irq)
cfg->vector = 0; cfg->vector = 0;
cpus_clear(cfg->domain); cpus_clear(cfg->domain);
if (likely(!cfg->move_in_progress))
return;
cpus_and(mask, cfg->old_domain, cpu_online_map);
for_each_cpu_mask_nr(cpu, mask) {
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
continue;
per_cpu(vector_irq, cpu)[vector] = -1;
break;
}
}
cfg->move_in_progress = 0;
} }
void __setup_vector_irq(int cpu) void __setup_vector_irq(int cpu)
@ -3594,27 +3608,7 @@ int __init io_apic_get_redir_entries (int ioapic)
int __init probe_nr_irqs(void) int __init probe_nr_irqs(void)
{ {
int idx; return NR_IRQS;
int nr = 0;
#ifndef CONFIG_XEN
int nr_min = 32;
#else
int nr_min = NR_IRQS;
#endif
for (idx = 0; idx < nr_ioapics; idx++)
nr += io_apic_get_redir_entries(idx) + 1;
/* double it for hotplug and msi and nmi */
nr <<= 1;
/* something wrong ? */
if (nr < nr_min)
nr = nr_min;
if (WARN_ON(nr > NR_IRQS))
nr = NR_IRQS;
return nr;
} }
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------

View File

@ -1567,7 +1567,7 @@ static int __init calgary_parse_options(char *p)
++p; ++p;
if (*p == '\0') if (*p == '\0')
break; break;
bridge = simple_strtol(p, &endp, 0); bridge = simple_strtoul(p, &endp, 0);
if (p == endp) if (p == endp)
break; break;

View File

@ -169,6 +169,15 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "0KW626"), DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
}, },
}, },
{ /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 330",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
},
},
{ /* Handle problems with rebooting on Dell 2400's */ { /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot, .callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400", .ident = "Dell PowerEdge 2400",

View File

@ -765,7 +765,7 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
.callback = dmi_low_memory_corruption, .callback = dmi_low_memory_corruption,
.ident = "Phoenix BIOS", .ident = "Phoenix BIOS",
.matches = { .matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"), DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
}, },
}, },
#endif #endif

View File

@ -46,7 +46,9 @@ static __cpuinit void check_tsc_warp(void)
cycles_t start, now, prev, end; cycles_t start, now, prev, end;
int i; int i;
rdtsc_barrier();
start = get_cycles(); start = get_cycles();
rdtsc_barrier();
/* /*
* The measurement runs for 20 msecs: * The measurement runs for 20 msecs:
*/ */
@ -61,7 +63,9 @@ static __cpuinit void check_tsc_warp(void)
*/ */
__raw_spin_lock(&sync_lock); __raw_spin_lock(&sync_lock);
prev = last_tsc; prev = last_tsc;
rdtsc_barrier();
now = get_cycles(); now = get_cycles();
rdtsc_barrier();
last_tsc = now; last_tsc = now;
__raw_spin_unlock(&sync_lock); __raw_spin_unlock(&sync_lock);

View File

@ -310,7 +310,7 @@ static void __init setup_xstate_init(void)
/* /*
* Enable and initialize the xsave feature. * Enable and initialize the xsave feature.
*/ */
void __init xsave_cntxt_init(void) void __ref xsave_cntxt_init(void)
{ {
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;

View File

@ -7,6 +7,7 @@
* This file provides all the same external entries as smp.c but uses * This file provides all the same external entries as smp.c but uses
* the voyager hal to provide the functionality * the voyager hal to provide the functionality
*/ */
#include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
@ -1790,6 +1791,17 @@ void __init smp_setup_processor_id(void)
x86_write_percpu(cpu_number, hard_smp_processor_id()); x86_write_percpu(cpu_number, hard_smp_processor_id());
} }
static void voyager_send_call_func(cpumask_t callmask)
{
__u32 mask = cpus_addr(callmask)[0] & ~(1 << smp_processor_id());
send_CPI(mask, VIC_CALL_FUNCTION_CPI);
}
static void voyager_send_call_func_single(int cpu)
{
send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
}
struct smp_ops smp_ops = { struct smp_ops smp_ops = {
.smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu, .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
.smp_prepare_cpus = voyager_smp_prepare_cpus, .smp_prepare_cpus = voyager_smp_prepare_cpus,
@ -1799,6 +1811,6 @@ struct smp_ops smp_ops = {
.smp_send_stop = voyager_smp_send_stop, .smp_send_stop = voyager_smp_send_stop,
.smp_send_reschedule = voyager_smp_send_reschedule, .smp_send_reschedule = voyager_smp_send_reschedule,
.send_call_func_ipi = native_send_call_func_ipi, .send_call_func_ipi = voyager_send_call_func,
.send_call_func_single_ipi = native_send_call_func_single_ipi, .send_call_func_single_ipi = voyager_send_call_func_single,
}; };

View File

@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
} }
} }
#ifdef CONFIG_HIBERNATION
/**
* resume_map_numa_kva - add KVA mapping to the temporary page tables created
* during resume from hibernation
* @pgd_base - temporary resume page directory
*/
void resume_map_numa_kva(pgd_t *pgd_base)
{
int node;
for_each_online_node(node) {
unsigned long start_va, start_pfn, size, pfn;
start_va = (unsigned long)node_remap_start_vaddr[node];
start_pfn = node_remap_start_pfn[node];
size = node_remap_size[node];
printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
pgd_t *pgd = pgd_base + pgd_index(vaddr);
pud_t *pud = pud_offset(pgd, vaddr);
pmd_t *pmd = pmd_offset(pud, vaddr);
set_pmd(pmd, pfn_pmd(start_pfn + pfn,
PAGE_KERNEL_LARGE_EXEC));
printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
__FUNCTION__, vaddr, start_pfn + pfn);
}
}
}
#endif
static unsigned long calculate_numa_remap_pages(void) static unsigned long calculate_numa_remap_pages(void)
{ {
int nid; int nid;

View File

@ -69,7 +69,7 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
int i; int i;
if (!reset_value) { if (!reset_value) {
reset_value = kmalloc(sizeof(unsigned) * num_counters, reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
GFP_ATOMIC); GFP_ATOMIC);
if (!reset_value) if (!reset_value)
return; return;

View File

@ -496,21 +496,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
pci_siemens_interrupt_controller); pci_siemens_interrupt_controller);
/* /*
* Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
* have 4096 bytes. Even if the device is capable, that doesn't mean we can * 4096 bytes configuration space for each function of their processor
* access it. Maybe we don't have a way to generate extended config space * configuration space.
* accesses. So check it
*/ */
static void fam10h_pci_cfg_space_size(struct pci_dev *dev) static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
{ {
dev->cfg_size = pci_cfg_space_size_ext(dev); dev->cfg_size = pci_cfg_space_size_ext(dev);
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
/* /*
* SB600: Disable BAR1 on device 14.0 to avoid HPET resources from * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from

View File

@ -12,6 +12,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/mmzone.h>
/* Defined in hibernate_asm_32.S */ /* Defined in hibernate_asm_32.S */
extern int restore_image(void); extern int restore_image(void);
@ -127,6 +128,9 @@ static int resume_physical_mapping_init(pgd_t *pgd_base)
} }
} }
} }
resume_map_numa_kva(pgd_base);
return 0; return 0;
} }

View File

@ -661,12 +661,11 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
* For 64-bit, we must skip the Xen hole in the middle of the address * For 64-bit, we must skip the Xen hole in the middle of the address
* space, just after the big x86-64 virtual hole. * space, just after the big x86-64 virtual hole.
*/ */
static int xen_pgd_walk(struct mm_struct *mm, static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
int (*func)(struct mm_struct *mm, struct page *, int (*func)(struct mm_struct *mm, struct page *,
enum pt_level), enum pt_level),
unsigned long limit) unsigned long limit)
{ {
pgd_t *pgd = mm->pgd;
int flush = 0; int flush = 0;
unsigned hole_low, hole_high; unsigned hole_low, hole_high;
unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
@ -753,6 +752,14 @@ out:
return flush; return flush;
} }
static int xen_pgd_walk(struct mm_struct *mm,
int (*func)(struct mm_struct *mm, struct page *,
enum pt_level),
unsigned long limit)
{
return __xen_pgd_walk(mm, mm->pgd, func, limit);
}
/* If we're using split pte locks, then take the page's lock and /* If we're using split pte locks, then take the page's lock and
return a pointer to it. Otherwise return NULL. */ return a pointer to it. Otherwise return NULL. */
static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm)
@ -854,7 +861,7 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
xen_mc_batch(); xen_mc_batch();
if (xen_pgd_walk(mm, xen_pin_page, USER_LIMIT)) { if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
/* re-enable interrupts for flushing */ /* re-enable interrupts for flushing */
xen_mc_issue(0); xen_mc_issue(0);
@ -998,7 +1005,7 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
PT_PMD); PT_PMD);
#endif #endif
xen_pgd_walk(mm, xen_unpin_page, USER_LIMIT); __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
xen_mc_issue(0); xen_mc_issue(0);
} }

View File

@ -217,6 +217,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio); return PTR_ERR(bio);
if (bio->bi_size != len) { if (bio->bi_size != len) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio, 0); bio_endio(bio, 0);
bio_unmap_user(bio); bio_unmap_user(bio);
return -EINVAL; return -EINVAL;

View File

@ -768,6 +768,8 @@ static int __init genhd_device_init(void)
bdev_map = kobj_map_init(base_probe, &block_class_lock); bdev_map = kobj_map_init(base_probe, &block_class_lock);
blk_dev_init(); blk_dev_init();
register_blkdev(BLOCK_EXT_MAJOR, "blkext");
#ifndef CONFIG_SYSFS_DEPRECATED #ifndef CONFIG_SYSFS_DEPRECATED
/* create top-level block dir */ /* create top-level block dir */
block_depr = kobject_create_and_add("block", NULL); block_depr = kobject_create_and_add("block", NULL);

View File

@ -18,7 +18,6 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
struct disk_part_iter piter; struct disk_part_iter piter;
long long start, length; long long start, length;
int partno; int partno;
int err;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
@ -61,10 +60,10 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
disk_part_iter_exit(&piter); disk_part_iter_exit(&piter);
/* all seems OK */ /* all seems OK */
err = add_partition(disk, partno, start, length, part = add_partition(disk, partno, start, length,
ADDPART_FLAG_NONE); ADDPART_FLAG_NONE);
mutex_unlock(&bdev->bd_mutex); mutex_unlock(&bdev->bd_mutex);
return err; return IS_ERR(part) ? PTR_ERR(part) : 0;
case BLKPG_DEL_PARTITION: case BLKPG_DEL_PARTITION:
part = disk_get_part(disk, partno); part = disk_get_part(disk, partno);
if (!part) if (!part)

View File

@ -2847,7 +2847,7 @@ static void do_cciss_request(struct request_queue *q)
h->maxSG = seg; h->maxSG = seg;
#ifdef CCISS_DEBUG #ifdef CCISS_DEBUG
printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n",
creq->nr_sectors, seg); creq->nr_sectors, seg);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
@ -3197,7 +3197,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */ c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
#ifdef CCISS_DEBUG #ifdef CCISS_DEBUG
printk("address 0 = %x\n", c->paddr); printk("address 0 = %lx\n", c->paddr);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
c->vaddr = remap_pci_mem(c->paddr, 0x250); c->vaddr = remap_pci_mem(c->paddr, 0x250);
@ -3224,7 +3224,8 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr);
#ifdef CCISS_DEBUG #ifdef CCISS_DEBUG
printk("cfg base address index = %x\n", cfg_base_addr_index); printk("cfg base address index = %llx\n",
(unsigned long long)cfg_base_addr_index);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
if (cfg_base_addr_index == -1) { if (cfg_base_addr_index == -1) {
printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
@ -3234,7 +3235,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
#ifdef CCISS_DEBUG #ifdef CCISS_DEBUG
printk("cfg offset = %x\n", cfg_offset); printk("cfg offset = %llx\n", (unsigned long long)cfg_offset);
#endif /* CCISS_DEBUG */ #endif /* CCISS_DEBUG */
c->cfgtable = remap_pci_mem(pci_resource_start(pdev, c->cfgtable = remap_pci_mem(pci_resource_start(pdev,
cfg_base_addr_index) + cfg_base_addr_index) +

View File

@ -338,12 +338,18 @@ wait:
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
{ {
struct request_queue *rq; struct request_queue *rq;
elevator_t *old_e;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock); rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
if (rq == NULL) if (rq == NULL)
return -1; return -1;
elevator_init(rq, "noop"); old_e = rq->elevator;
if (IS_ERR_VALUE(elevator_init(rq, "noop")))
printk(KERN_WARNING
"blkfront: Switch elevator failed, use default\n");
else
elevator_exit(old_e);
/* Hard sector size and max sectors impersonate the equiv. hardware. */ /* Hard sector size and max sectors impersonate the equiv. hardware. */
blk_queue_hardsect_size(rq, sector_size); blk_queue_hardsect_size(rq, sector_size);

View File

@ -1134,7 +1134,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue; continue;
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-12s) %s %s", seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label, gpio, gdesc->label,
is_out ? "out" : "in ", is_out ? "out" : "in ",
chip->get chip->get

View File

@ -128,6 +128,9 @@ static const char* temperature_sensors_sets[][36] = {
/* Set 13: iMac 8,1 */ /* Set 13: iMac 8,1 */
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P", { "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
"TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL }, "TL0P", "TO0P", "TW0P", "Tm0P", "Tp0P", NULL },
/* Set 14: iMac 6,1 */
{ "TA0P", "TC0D", "TC0H", "TC0P", "TG0D", "TG0H", "TG0P", "TH0P",
"TO0P", "Tp0P", NULL },
}; };
/* List of keys used to read/write fan speeds */ /* List of keys used to read/write fan speeds */
@ -1296,6 +1299,8 @@ static __initdata struct dmi_match_data applesmc_dmi_data[] = {
{ .accelerometer = 1, .light = 1, .temperature_set = 12 }, { .accelerometer = 1, .light = 1, .temperature_set = 12 },
/* iMac 8: light sensor only, temperature set 13 */ /* iMac 8: light sensor only, temperature set 13 */
{ .accelerometer = 0, .light = 0, .temperature_set = 13 }, { .accelerometer = 0, .light = 0, .temperature_set = 13 },
/* iMac 6: light sensor only, temperature set 14 */
{ .accelerometer = 0, .light = 0, .temperature_set = 14 },
}; };
/* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1". /* Note that DMI_MATCH(...,"MacBook") will match "MacBookPro1,1".
@ -1349,10 +1354,18 @@ static __initdata struct dmi_system_id applesmc_whitelist[] = {
DMI_MATCH(DMI_BOARD_VENDOR,"Apple"), DMI_MATCH(DMI_BOARD_VENDOR,"Apple"),
DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") }, DMI_MATCH(DMI_PRODUCT_NAME,"MacPro2") },
&applesmc_dmi_data[4]}, &applesmc_dmi_data[4]},
{ applesmc_dmi_match, "Apple MacPro", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "MacPro") },
&applesmc_dmi_data[4]},
{ applesmc_dmi_match, "Apple iMac 8", { { applesmc_dmi_match, "Apple iMac 8", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") }, DMI_MATCH(DMI_PRODUCT_NAME, "iMac8") },
&applesmc_dmi_data[13]}, &applesmc_dmi_data[13]},
{ applesmc_dmi_match, "Apple iMac 6", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac6") },
&applesmc_dmi_data[14]},
{ applesmc_dmi_match, "Apple iMac 5", { { applesmc_dmi_match, "Apple iMac 5", {
DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") }, DMI_MATCH(DMI_PRODUCT_NAME, "iMac5") },

View File

@ -1,3 +1,7 @@
ifdef CONFIG_SGI_GRU_DEBUG
EXTRA_CFLAGS += -DDEBUG
endif
obj-$(CONFIG_SGI_GRU) := gru.o obj-$(CONFIG_SGI_GRU) := gru.o
gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o gru-y := grufile.o grumain.o grufault.o grutlbpurge.o gruprocfs.o grukservices.o

View File

@ -1690,9 +1690,11 @@ static int atl2_resume(struct pci_dev *pdev)
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
err = atl2_request_irq(adapter); if (netif_running(netdev)) {
if (netif_running(netdev) && err) err = atl2_request_irq(adapter);
return err; if (err)
return err;
}
atl2_reset_hw(&adapter->hw); atl2_reset_hw(&adapter->hw);

View File

@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
struct ipg_rx *rxfd = sp->rxd + entry; struct ipg_rx *rxfd = sp->rxd + entry;
pci_unmap_single(sp->pdev, pci_unmap_single(sp->pdev,
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE); sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]); dev_kfree_skb_irq(sp->rx_buff[entry]);
sp->rx_buff[entry] = NULL; sp->rx_buff[entry] = NULL;
@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
*/ */
if (sp->rx_buff[entry]) { if (sp->rx_buff[entry]) {
pci_unmap_single(sp->pdev, pci_unmap_single(sp->pdev,
le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE); sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]); dev_kfree_skb_irq(sp->rx_buff[entry]);
@ -1246,7 +1246,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
if (jumbo->found_start) if (jumbo->found_start)
dev_kfree_skb_irq(jumbo->skb); dev_kfree_skb_irq(jumbo->skb);
pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE); sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb, sp->rxfrag_size); skb_put(skb, sp->rxfrag_size);
@ -1349,7 +1349,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
unsigned int entry = curr % IPG_RFDLIST_LENGTH; unsigned int entry = curr % IPG_RFDLIST_LENGTH;
struct ipg_rx *rxfd = sp->rxd + entry; struct ipg_rx *rxfd = sp->rxd + entry;
if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
break; break;
switch (ipg_nic_rx_check_frame_type(dev)) { switch (ipg_nic_rx_check_frame_type(dev)) {

View File

@ -1287,7 +1287,34 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
return; return;
} }
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter); /**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i;
for (i = 0; i < adapter->num_msix_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
mask = IXGBE_EIMS_ENABLE_MASK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
/** /**
* ixgbe_intr - legacy mode Interrupt Handler * ixgbe_intr - legacy mode Interrupt Handler
@ -1393,35 +1420,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
} }
} }
/**
* ixgbe_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
{
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i;
for (i = 0; i < adapter->num_msix_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
}
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
{
u32 mask;
mask = IXGBE_EIMS_ENABLE_MASK;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_FLUSH(&adapter->hw);
}
/** /**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
* *

View File

@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize); skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev); skb->protocol = eth_type_trans(skb, jme->dev);
if (jme_rxsum_ok(jme, rxdesc->descwb.flags)) if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
if (rxdesc->descwb.flags & RXWBFLAG_TAGON) { if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) { if (jme->vlgrp) {
jme->jme_vlan_rx(skb, jme->vlgrp, jme->jme_vlan_rx(skb, jme->vlgrp,
le32_to_cpu(rxdesc->descwb.vlan)); le16_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4; NET_STAT(jme).rx_bytes += 4;
} }
} else { } else {
jme->jme_rx(skb); jme->jme_rx(skb);
} }
if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) == if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
RXWBFLAG_DEST_MUL) cpu_to_le16(RXWBFLAG_DEST_MUL))
++(NET_STAT(jme).multicast); ++(NET_STAT(jme).multicast);
jme->dev->last_rx = jiffies; jme->dev->last_rx = jiffies;
@ -961,7 +961,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
rxdesc = rxring->desc; rxdesc = rxring->desc;
rxdesc += i; rxdesc += i;
if ((rxdesc->descwb.flags & RXWBFLAG_OWN) || if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL)) !(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
goto out; goto out;
@ -1763,10 +1763,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
} }
static int static int
jme_tx_tso(struct sk_buff *skb, jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
u16 *mss, u8 *flags)
{ {
*mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT; *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
if (*mss) { if (*mss) {
*flags |= TXFLAG_LSEN; *flags |= TXFLAG_LSEN;
@ -1826,11 +1825,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
} }
static inline void static inline void
jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags) jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
{ {
if (vlan_tx_tag_present(skb)) { if (vlan_tx_tag_present(skb)) {
*flags |= TXFLAG_TAGON; *flags |= TXFLAG_TAGON;
*vlan = vlan_tx_tag_get(skb); *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
} }
} }

View File

@ -899,7 +899,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
if (skb != NULL) { if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) < if (skb_queue_len(&mp->rx_recycle) <
mp->default_rx_ring_size && mp->default_rx_ring_size &&
skb_recycle_check(skb, mp->skb_size)) skb_recycle_check(skb, mp->skb_size +
dma_get_cache_alignment() - 1))
__skb_queue_head(&mp->rx_recycle, skb); __skb_queue_head(&mp->rx_recycle, skb);
else else
dev_kfree_skb(skb); dev_kfree_skb(skb);
@ -2435,8 +2436,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
if (pd == NULL || pd->shared_smi == NULL) { if (pd == NULL || pd->shared_smi == NULL) {
mdiobus_free(msp->smi_bus);
mdiobus_unregister(msp->smi_bus); mdiobus_unregister(msp->smi_bus);
mdiobus_free(msp->smi_bus);
} }
if (msp->err_interrupt != NO_IRQ) if (msp->err_interrupt != NO_IRQ)
free_irq(msp->err_interrupt, msp); free_irq(msp->err_interrupt, msp);

View File

@ -564,20 +564,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/ */
int genphy_config_aneg(struct phy_device *phydev) int genphy_config_aneg(struct phy_device *phydev)
{ {
int result = 0; int result;
if (AUTONEG_ENABLE == phydev->autoneg) { if (AUTONEG_ENABLE != phydev->autoneg)
int result = genphy_config_advert(phydev); return genphy_setup_forced(phydev);
if (result < 0) /* error */ result = genphy_config_advert(phydev);
return result;
/* Only restart aneg if we are advertising something different if (result < 0) /* error */
* than we were before. */ return result;
if (result > 0)
result = genphy_restart_aneg(phydev); if (result == 0) {
} else /* Advertisment hasn't changed, but maybe aneg was never on to
result = genphy_setup_forced(phydev); * begin with? Or maybe phy was isolated? */
int ctl = phy_read(phydev, MII_BMCR);
if (ctl < 0)
return ctl;
if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
result = 1; /* do restart aneg */
}
/* Only restart aneg if we are advertising something different
* than we were before. */
if (result > 0)
result = genphy_restart_aneg(phydev);
return result; return result;
} }

View File

@ -927,7 +927,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc; struct sh_eth_txdesc *txdesc;
u32 entry; u32 entry;
int flags; unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags); spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
@ -1141,7 +1141,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
/* Hook up MII support for ethtool */ /* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii"; mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev; mdp->mii_bus->parent = &ndev->dev;
mdp->mii_bus->id[0] = id; snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
/* PHY IRQ */ /* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);

View File

@ -1813,7 +1813,7 @@ static int __init smc911x_probe(struct net_device *dev)
val = SMC_GET_BYTE_TEST(lp); val = SMC_GET_BYTE_TEST(lp);
DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val); DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
if (val != 0x87654321) { if (val != 0x87654321) {
printk(KERN_ERR "Invalid chip endian 0x08%x\n",val); printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
retval = -ENODEV; retval = -ENODEV;
goto err_out; goto err_out;
} }

View File

@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
mode = AX88178_MEDIUM_DEFAULT; mode = AX88178_MEDIUM_DEFAULT;
if (ecmd.speed == SPEED_1000) if (ecmd.speed == SPEED_1000)
mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK; mode |= AX_MEDIUM_GM;
else if (ecmd.speed == SPEED_100) else if (ecmd.speed == SPEED_100)
mode |= AX_MEDIUM_PS; mode |= AX_MEDIUM_PS;
else else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM); mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
mode |= AX_MEDIUM_ENCK;
if (ecmd.duplex == DUPLEX_FULL) if (ecmd.duplex == DUPLEX_FULL)
mode |= AX_MEDIUM_FD; mode |= AX_MEDIUM_FD;
else else

View File

@ -1384,7 +1384,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL; rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->aligned_dma_addr,
priv->hw_params.rx_buf_size, priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data; pkt = (struct iwl_rx_packet *)rxb->skb->data;
@ -1436,8 +1436,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL; rxb->skb = NULL;
} }
pci_unmap_single(priv->pci_dev, rxb->dma_addr, pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size, priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags); spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used); list_add_tail(&rxb->list, &priv->rxq.rx_used);
@ -2341,7 +2341,6 @@ static void iwl_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
iwl_alive_start(priv); iwl_alive_start(priv);
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
} }
static void iwl4965_bg_rf_kill(struct work_struct *work) static void iwl4965_bg_rf_kill(struct work_struct *work)

View File

@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
#define DEFAULT_LONG_RETRY_LIMIT 4U #define DEFAULT_LONG_RETRY_LIMIT 4U
struct iwl_rx_mem_buffer { struct iwl_rx_mem_buffer {
dma_addr_t dma_addr; dma_addr_t real_dma_addr;
dma_addr_t aligned_dma_addr;
struct sk_buff *skb; struct sk_buff *skb;
struct list_head list; struct list_head list;
}; };

View File

@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
list_del(element); list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */ /* Point to Rx buffer via next RBD in circular buffer */
rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
rxq->queue[rxq->write] = rxb; rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--; rxq->free_count--;
@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
rxb = list_entry(element, struct iwl_rx_mem_buffer, list); rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
/* Alloc a new receive buffer */ /* Alloc a new receive buffer */
rxb->skb = alloc_skb(priv->hw_params.rx_buf_size, rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
__GFP_NOWARN | GFP_ATOMIC); __GFP_NOWARN | GFP_ATOMIC);
if (!rxb->skb) { if (!rxb->skb) {
if (net_ratelimit()) if (net_ratelimit())
@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
list_del(element); list_del(element);
/* Get physical address of RB/SKB */ /* Get physical address of RB/SKB */
rxb->dma_addr = rxb->real_dma_addr = pci_map_single(
pci_map_single(priv->pci_dev, rxb->skb->data, priv->pci_dev,
priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE); rxb->skb->data,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
list_add_tail(&rxb->list, &rxq->rx_free); list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++; rxq->free_count++;
} }
@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) { if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev, pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr, rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size, priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb); dev_kfree_skb(rxq->pool[i].skb);
} }
@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
* to an SKB, so we need to unmap and free potential storage */ * to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) { if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev, pci_unmap_single(priv->pci_dev,
rxq->pool[i].dma_addr, rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size, priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--; priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb); dev_kfree_skb(rxq->pool[i].skb);

View File

@ -6012,7 +6012,6 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
mutex_lock(&priv->mutex); mutex_lock(&priv->mutex);
iwl3945_alive_start(priv); iwl3945_alive_start(priv);
mutex_unlock(&priv->mutex); mutex_unlock(&priv->mutex);
ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
} }
static void iwl3945_bg_rf_kill(struct work_struct *work) static void iwl3945_bg_rf_kill(struct work_struct *work)

View File

@ -331,7 +331,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
/* Fill the receive configuration URB and initialise the Rx call back */ /* Fill the receive configuration URB and initialise the Rx call back */
usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
usb_rcvbulkpipe(cardp->udev, cardp->ep_in), usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
(void *) (skb->tail), skb_tail_pointer(skb),
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;

View File

@ -36,7 +36,7 @@ if PARPORT
config PARPORT_PC config PARPORT_PC
tristate "PC-style hardware" tristate "PC-style hardware"
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \
(!M68K || ISA) && !MN10300 && !AVR32 (!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN
---help--- ---help---
You should say Y here if you have a PC-style parallel port. All You should say Y here if you have a PC-style parallel port. All
IBM PC compatible computers and some Alphas have PC-style IBM PC compatible computers and some Alphas have PC-style

View File

@ -1655,12 +1655,14 @@ int __init init_dmars(void)
iommu->flush.flush_context = __iommu_flush_context; iommu->flush.flush_context = __iommu_flush_context;
iommu->flush.flush_iotlb = __iommu_flush_iotlb; iommu->flush.flush_iotlb = __iommu_flush_iotlb;
printk(KERN_INFO "IOMMU 0x%Lx: using Register based " printk(KERN_INFO "IOMMU 0x%Lx: using Register based "
"invalidation\n", drhd->reg_base_addr); "invalidation\n",
(unsigned long long)drhd->reg_base_addr);
} else { } else {
iommu->flush.flush_context = qi_flush_context; iommu->flush.flush_context = qi_flush_context;
iommu->flush.flush_iotlb = qi_flush_iotlb; iommu->flush.flush_iotlb = qi_flush_iotlb;
printk(KERN_INFO "IOMMU 0x%Lx: using Queued " printk(KERN_INFO "IOMMU 0x%Lx: using Queued "
"invalidation\n", drhd->reg_base_addr); "invalidation\n",
(unsigned long long)drhd->reg_base_addr);
} }
} }

View File

@ -1832,7 +1832,7 @@ int pci_reset_function(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP_FLR)) if (!(cap & PCI_EXP_DEVCAP_FLR))
return -ENOTTY; return -ENOTTY;
if (!dev->msi_enabled && !dev->msix_enabled) if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
disable_irq(dev->irq); disable_irq(dev->irq);
pci_save_state(dev); pci_save_state(dev);
@ -1841,7 +1841,7 @@ int pci_reset_function(struct pci_dev *dev)
r = pci_execute_reset_function(dev); r = pci_execute_reset_function(dev);
pci_restore_state(dev); pci_restore_state(dev);
if (!dev->msi_enabled && !dev->msix_enabled) if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
enable_irq(dev->irq); enable_irq(dev->irq);
return r; return r;

View File

@ -352,21 +352,21 @@ static int map_dma_buffers(struct driver_data *drv_data)
} else } else
drv_data->tx_map_len = drv_data->len; drv_data->tx_map_len = drv_data->len;
/* Stream map the rx buffer */ /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
drv_data->rx_dma = dma_map_single(dev, drv_data->rx, * so we flush the cache *before* invalidating it, in case
drv_data->rx_map_len, * the tx and rx buffers overlap.
DMA_FROM_DEVICE); */
if (dma_mapping_error(dev, drv_data->rx_dma)) drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
drv_data->tx_map_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma))
return 0; return 0;
/* Stream map the tx buffer */ /* Stream map the rx buffer */
drv_data->tx_dma = dma_map_single(dev, drv_data->tx, drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
drv_data->tx_map_len,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma)) {
dma_unmap_single(dev, drv_data->rx_dma,
drv_data->rx_map_len, DMA_FROM_DEVICE); drv_data->rx_map_len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
dma_unmap_single(dev, drv_data->tx_dma,
drv_data->tx_map_len, DMA_TO_DEVICE);
return 0; return 0;
} }

View File

@ -506,20 +506,6 @@ static int map_dma_buffers(struct driver_data *drv_data)
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
return -1; return -1;
/* NULL rx means write-only transfer and no map needed
since rx DMA will not be used */
if (drv_data->rx) {
buf = drv_data->rx;
drv_data->rx_dma = dma_map_single(
dev,
buf,
drv_data->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma))
return -1;
drv_data->rx_dma_needs_unmap = 1;
}
if (drv_data->tx == NULL) { if (drv_data->tx == NULL) {
/* Read only message --> use drv_data->dummy_dma_buf for dummy /* Read only message --> use drv_data->dummy_dma_buf for dummy
writes to achive reads */ writes to achive reads */
@ -533,18 +519,31 @@ static int map_dma_buffers(struct driver_data *drv_data)
buf, buf,
drv_data->tx_map_len, drv_data->tx_map_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(dev, drv_data->tx_dma)) { if (dma_mapping_error(dev, drv_data->tx_dma))
if (drv_data->rx_dma) {
dma_unmap_single(dev,
drv_data->rx_dma,
drv_data->len,
DMA_FROM_DEVICE);
drv_data->rx_dma_needs_unmap = 0;
}
return -1; return -1;
}
drv_data->tx_dma_needs_unmap = 1; drv_data->tx_dma_needs_unmap = 1;
/* NULL rx means write-only transfer and no map needed
* since rx DMA will not be used */
if (drv_data->rx) {
buf = drv_data->rx;
drv_data->rx_dma = dma_map_single(dev,
buf,
drv_data->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, drv_data->rx_dma)) {
if (drv_data->tx_dma) {
dma_unmap_single(dev,
drv_data->tx_dma,
drv_data->tx_map_len,
DMA_TO_DEVICE);
drv_data->tx_dma_needs_unmap = 0;
}
return -1;
}
drv_data->rx_dma_needs_unmap = 1;
}
return 0; return 0;
} }

View File

@ -172,7 +172,6 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
.bDescriptorType = USB_DT_INTERFACE, .bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */ /* .bInterfaceNumber = DYNAMIC */
.bAlternateSetting = 1,
.bNumEndpoints = 2, .bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceClass = USB_CLASS_CDC_DATA,
.bInterfaceSubClass = 0, .bInterfaceSubClass = 0,
@ -303,7 +302,7 @@ static void rndis_response_available(void *_rndis)
__le32 *data = req->buf; __le32 *data = req->buf;
int status; int status;
if (atomic_inc_return(&rndis->notify_count)) if (atomic_inc_return(&rndis->notify_count) != 1)
return; return;
/* Send RNDIS RESPONSE_AVAILABLE notification; a /* Send RNDIS RESPONSE_AVAILABLE notification; a

View File

@ -66,6 +66,8 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
{ {
struct ehci_hcd *ehci = hcd_to_ehci(hcd); struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller); struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
struct pci_dev *p_smbus;
u8 rev;
u32 temp; u32 temp;
int retval; int retval;
@ -166,6 +168,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
pci_write_config_byte(pdev, 0x4b, tmp | 0x20); pci_write_config_byte(pdev, 0x4b, tmp | 0x20);
} }
break; break;
case PCI_VENDOR_ID_ATI:
/* SB700 old version has a bug in EHCI controller,
* which causes usb devices lose response in some cases.
*/
if (pdev->device == 0x4396) {
p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
PCI_DEVICE_ID_ATI_SBX00_SMBUS,
NULL);
if (!p_smbus)
break;
rev = p_smbus->revision;
if ((rev == 0x3a) || (rev == 0x3b)) {
u8 tmp;
pci_read_config_byte(pdev, 0x53, &tmp);
pci_write_config_byte(pdev, 0x53, tmp | (1<<3));
}
pci_dev_put(p_smbus);
}
break;
} }
ehci_reset(ehci); ehci_reset(ehci);

View File

@ -687,7 +687,10 @@ static ssize_t mon_bin_read(struct file *file, char __user *buf,
} }
if (rp->b_read >= sizeof(struct mon_bin_hdr)) { if (rp->b_read >= sizeof(struct mon_bin_hdr)) {
step_len = min(nbytes, (size_t)ep->len_cap); step_len = ep->len_cap;
step_len -= rp->b_read - sizeof(struct mon_bin_hdr);
if (step_len > nbytes)
step_len = nbytes;
offset = rp->b_out + PKT_SIZE; offset = rp->b_out + PKT_SIZE;
offset += rp->b_read - sizeof(struct mon_bin_hdr); offset += rp->b_read - sizeof(struct mon_bin_hdr);
if (offset >= rp->b_size) if (offset >= rp->b_size)

View File

@ -1757,7 +1757,7 @@ static int musb_schedule(
} }
} }
/* use bulk reserved ep1 if no other ep is free */ /* use bulk reserved ep1 if no other ep is free */
if (best_end > 0 && qh->type == USB_ENDPOINT_XFER_BULK) { if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
hw_ep = musb->bulk_ep; hw_ep = musb->bulk_ep;
if (is_in) if (is_in)
head = &musb->in_bulk; head = &musb->in_bulk;

View File

@ -56,6 +56,7 @@ static void cp2101_shutdown(struct usb_serial *);
static int debug; static int debug;
static struct usb_device_id id_table [] = { static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */

View File

@ -167,6 +167,13 @@ UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
US_SC_DEVICE, US_PR_DEVICE, NULL, US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ), US_FL_FIX_CAPACITY ),
/* Patch for Nokia 5310 capacity */
UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
"Nokia",
"5310",
US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
/* Reported by Mario Rettig <mariorettig@web.de> */ /* Reported by Mario Rettig <mariorettig@web.de> */
UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100, UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
"Nokia", "Nokia",
@ -233,14 +240,14 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
US_FL_MAX_SECTORS_64 ), US_FL_MAX_SECTORS_64 ),
/* Reported by Cedric Godin <cedric@belbone.be> */ /* Reported by Cedric Godin <cedric@belbone.be> */
UNUSUAL_DEV( 0x0421, 0x04b9, 0x0551, 0x0551, UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
"Nokia", "Nokia",
"5300", "5300",
US_SC_DEVICE, US_PR_DEVICE, NULL, US_SC_DEVICE, US_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ), US_FL_FIX_CAPACITY ),
/* Reported by Richard Nauber <RichardNauber@web.de> */ /* Reported by Richard Nauber <RichardNauber@web.de> */
UNUSUAL_DEV( 0x0421, 0x04fa, 0x0601, 0x0601, UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
"Nokia", "Nokia",
"6300", "6300",
US_SC_DEVICE, US_PR_DEVICE, NULL, US_SC_DEVICE, US_PR_DEVICE, NULL,

View File

@ -132,7 +132,7 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
bl = backlight_device_register("backlight", &sinfo->pdev->dev, bl = backlight_device_register("backlight", &sinfo->pdev->dev,
sinfo, &atmel_lcdc_bl_ops); sinfo, &atmel_lcdc_bl_ops);
if (IS_ERR(sinfo->backlight)) { if (IS_ERR(bl)) {
dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n", dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
PTR_ERR(bl)); PTR_ERR(bl));
return; return;

View File

@ -119,6 +119,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
default: default:
dev_err(&pdev->dev, "invalid backlight device ID(%d)\n", dev_err(&pdev->dev, "invalid backlight device ID(%d)\n",
pdev->id); pdev->id);
kfree(data);
return -EINVAL; return -EINVAL;
} }
@ -130,6 +131,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
data, &da903x_backlight_ops); data, &da903x_backlight_ops);
if (IS_ERR(bl)) { if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n"); dev_err(&pdev->dev, "failed to register backlight\n");
kfree(data);
return PTR_ERR(bl); return PTR_ERR(bl);
} }

View File

@ -42,10 +42,13 @@ static int fb_notifier_callback(struct notifier_block *self,
mutex_lock(&ld->ops_lock); mutex_lock(&ld->ops_lock);
if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
if (event == FB_EVENT_BLANK) if (event == FB_EVENT_BLANK) {
ld->ops->set_power(ld, *(int *)evdata->data); if (ld->ops->set_power)
else ld->ops->set_power(ld, *(int *)evdata->data);
ld->ops->set_mode(ld, evdata->data); } else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
}
} }
mutex_unlock(&ld->ops_lock); mutex_unlock(&ld->ops_lock);
return 0; return 0;

Some files were not shown because too many files have changed in this diff Show More