dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'for-rmk-misc' into for-rmk

This commit is contained in:
Uwe Kleine-Koenig 2008-12-12 11:18:54 +01:00
commit 7971db5a4b
71 changed files with 431 additions and 228 deletions

View File

@ -156,7 +156,6 @@ config ARCH_MTD_XIP
bool bool
config GENERIC_HARDIRQS_NO__DO_IRQ config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y def_bool y
if OPROFILE if OPROFILE

View File

@ -728,9 +728,9 @@ CONFIG_RTC_CLASS=m
# #
# RTC interfaces # RTC interfaces
# #
CONFIG_RTC_INTF_SYSFS=m CONFIG_RTC_INTF_SYSFS=y
CONFIG_RTC_INTF_PROC=m CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=m CONFIG_RTC_INTF_DEV=y
# #
# RTC drivers # RTC drivers

View File

@ -1069,9 +1069,9 @@ CONFIG_RTC_CLASS=m
# #
# RTC interfaces # RTC interfaces
# #
CONFIG_RTC_INTF_SYSFS=m CONFIG_RTC_INTF_SYSFS=y
CONFIG_RTC_INTF_PROC=m CONFIG_RTC_INTF_PROC=y
CONFIG_RTC_INTF_DEV=m CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set # CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
# #

View File

@ -237,6 +237,7 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#if __LINUX_ARM_ARCH__ < 5 #if __LINUX_ARM_ARCH__ < 5
#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/__ffs.h> #include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/ffs.h>
@ -277,16 +278,19 @@ static inline int constant_fls(int x)
* the clz instruction for much better code efficiency. * the clz instruction for much better code efficiency.
*/ */
#define __fls(x) \
( __builtin_constant_p(x) ? constant_fls(x) : \
({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) )
/* Implement fls() in C so that 64-bit args are suitably truncated */
static inline int fls(int x) static inline int fls(int x)
{ {
return __fls(x); int ret;
if (__builtin_constant_p(x))
return constant_fls(x);
asm("clz\t%0, %1" : "=r" (ret) : "r" (x) : "cc");
ret = 32 - ret;
return ret;
} }
#define __fls(x) (fls(x) - 1)
#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
#define __ffs(x) (ffs(x) - 1) #define __ffs(x) (ffs(x) - 1)
#define ffz(x) __ffs( ~(x) ) #define ffz(x) __ffs( ~(x) )

View File

@ -23,7 +23,7 @@
#include <asm/types.h> #include <asm/types.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
#define STACK_TOP ((current->personality == PER_LINUX_32BIT) ? \ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
TASK_SIZE : TASK_SIZE_26) TASK_SIZE : TASK_SIZE_26)
#define STACK_TOP_MAX TASK_SIZE #define STACK_TOP_MAX TASK_SIZE
#endif #endif

View File

@ -115,6 +115,8 @@ EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__clear_user);
@ -181,8 +183,6 @@ EXPORT_SYMBOL(_find_first_bit_be);
EXPORT_SYMBOL(_find_next_bit_be); EXPORT_SYMBOL(_find_next_bit_be);
#endif #endif
EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(mcount); EXPORT_SYMBOL(mcount);
#endif #endif

View File

@ -95,7 +95,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret; return ret;
} }
/* run from kstop_machine */ /* run from ftrace_init with irqs disabled */
int __init ftrace_dyn_arch_init(void *data) int __init ftrace_dyn_arch_init(void *data)
{ {
ftrace_mcount_set(data); ftrace_mcount_set(data);

View File

@ -141,6 +141,15 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
/* Use "raw" primitives so we behave correctly on RT kernels. */ /* Use "raw" primitives so we behave correctly on RT kernels. */
raw_local_irq_save(flags); raw_local_irq_save(flags);
/*
* According to Thomas Gleixner irqs are already disabled here. Simply
* removing raw_local_irq_save above (and the matching
* raw_local_irq_restore) was not accepted. See
* http://thread.gmane.org/gmane.linux.ports.arm.kernel/41174
* So for now (2008-11-20) just warn once if irqs were not disabled ...
*/
WARN_ON_ONCE(!raw_irqs_disabled_flags(flags));
/* The alarm IRQ uses absolute time (now+delta), not the relative /* The alarm IRQ uses absolute time (now+delta), not the relative
* time (delta) in our calling convention. Like all clockevents * time (delta) in our calling convention. Like all clockevents
* using such "match" hardware, we have a race to defend against. * using such "match" hardware, we have a race to defend against.

View File

@ -128,7 +128,7 @@ void __init omap1_map_common_io(void)
* Common low-level hardware init for omap1. This should only get called from * Common low-level hardware init for omap1. This should only get called from
* board specific init. * board specific init.
*/ */
void __init omap1_init_common_hw() void __init omap1_init_common_hw(void)
{ {
/* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort /* REVISIT: Refer to OMAP5910 Errata, Advisory SYS_1: "Timeout Abort
* on a Posted Write in the TIPB Bridge". * on a Posted Write in the TIPB Bridge".

View File

@ -70,6 +70,10 @@ static unsigned long ai_dword;
static unsigned long ai_multi; static unsigned long ai_multi;
static int ai_usermode; static int ai_usermode;
#define UM_WARN (1 << 0)
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static const char *usermode_action[] = { static const char *usermode_action[] = {
"ignored", "ignored",
@ -754,7 +758,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
user: user:
ai_user += 1; ai_user += 1;
if (ai_usermode & 1) if (ai_usermode & UM_WARN)
printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
"Address=0x%08lx FSR 0x%03x\n", current->comm, "Address=0x%08lx FSR 0x%03x\n", current->comm,
task_pid_nr(current), instrptr, task_pid_nr(current), instrptr,
@ -762,10 +766,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
thumb_mode(regs) ? tinstr : instr, thumb_mode(regs) ? tinstr : instr,
addr, fsr); addr, fsr);
if (ai_usermode & 2) if (ai_usermode & UM_FIXUP)
goto fixup; goto fixup;
if (ai_usermode & 4) if (ai_usermode & UM_SIGNAL)
force_sig(SIGBUS, current); force_sig(SIGBUS, current);
else else
set_cr(cr_no_alignment); set_cr(cr_no_alignment);
@ -796,6 +800,22 @@ static int __init alignment_init(void)
res->write_proc = proc_alignment_write; res->write_proc = proc_alignment_write;
#endif #endif
/*
* ARMv6 and later CPUs can perform unaligned accesses for
* most single load and store instructions up to word size.
* LDM, STM, LDRD and STRD still need to be handled.
*
* Ignoring the alignment fault is not an option on these
* CPUs since we spin re-faulting the instruction without
* making any progress.
*/
if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = UM_FIXUP;
}
hook_fault_code(1, do_alignment, SIGILL, "alignment exception"); hook_fault_code(1, do_alignment, SIGILL, "alignment exception");
hook_fault_code(3, do_alignment, SIGILL, "alignment exception"); hook_fault_code(3, do_alignment, SIGILL, "alignment exception");

View File

@ -353,8 +353,8 @@ struct omapfb_device {
u32 pseudo_palette[17]; u32 pseudo_palette[17];
struct lcd_panel *panel; /* LCD panel */ struct lcd_panel *panel; /* LCD panel */
struct lcd_ctrl *ctrl; /* LCD controller */ const struct lcd_ctrl *ctrl; /* LCD controller */
struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */ const struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
struct lcd_ctrl_extif *ext_if; /* LCD ctrl external struct lcd_ctrl_extif *ext_if; /* LCD ctrl external
interface */ interface */
struct device *dev; struct device *dev;

View File

@ -255,7 +255,7 @@ void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
if (!_omap_sram_reprogram_clock) if (!_omap_sram_reprogram_clock)
omap_sram_error(); omap_sram_error();
return _omap_sram_reprogram_clock(dpllctl, ckctl); _omap_sram_reprogram_clock(dpllctl, ckctl);
} }
int __init omap1_sram_init(void) int __init omap1_sram_init(void)
@ -282,8 +282,8 @@ void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
if (!_omap2_sram_ddr_init) if (!_omap2_sram_ddr_init)
omap_sram_error(); omap_sram_error();
return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl, _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
base_cs, force_unlock); base_cs, force_unlock);
} }
static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val, static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val,
@ -294,7 +294,7 @@ void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
if (!_omap2_sram_reprogram_sdrc) if (!_omap2_sram_reprogram_sdrc)
omap_sram_error(); omap_sram_error();
return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type); _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
} }
static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass); static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);

View File

@ -35,7 +35,7 @@
#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
#define PCIE_CONF_FUNC(f) (((f) & 0x3) << 8) #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
#define PCIE_CONF_DATA_OFF 0x18fc #define PCIE_CONF_DATA_OFF 0x18fc
#define PCIE_MASK_OFF 0x1910 #define PCIE_MASK_OFF 0x1910
#define PCIE_CTRL_OFF 0x1a00 #define PCIE_CTRL_OFF 0x1a00

View File

@ -40,6 +40,7 @@ _GLOBAL(__setup_cpu_460gt)
mtlr r4 mtlr r4
blr blr
_GLOBAL(__setup_cpu_440x5)
_GLOBAL(__setup_cpu_440gx) _GLOBAL(__setup_cpu_440gx)
_GLOBAL(__setup_cpu_440spe) _GLOBAL(__setup_cpu_440spe)
b __fixup_440A_mcheck b __fixup_440A_mcheck

View File

@ -39,6 +39,7 @@ extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
@ -1500,6 +1501,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_user_features = COMMON_USER_BOOKE, .cpu_user_features = COMMON_USER_BOOKE,
.icache_bsize = 32, .icache_bsize = 32,
.dcache_bsize = 32, .dcache_bsize = 32,
.cpu_setup = __setup_cpu_440x5,
.machine_check = machine_check_440A,
.platform = "ppc440", .platform = "ppc440",
}, },
{ /* 460EX */ { /* 460EX */

View File

@ -344,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
u64 __pte, *pte, *page; u64 __pte, *pte, *page;
bus_addr = PAGE_ALIGN(bus_addr); bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(bus_addr); phys_addr = PAGE_ALIGN(phys_addr);
/* only support 512GB address spaces for now */ /* only support 512GB address spaces for now */
if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@ -600,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
continue; continue;
p2 = IOMMU_PTE_PAGE(p1[i]); p2 = IOMMU_PTE_PAGE(p1[i]);
for (j = 0; j < 512; ++i) { for (j = 0; j < 512; ++j) {
if (!IOMMU_PTE_PRESENT(p2[j])) if (!IOMMU_PTE_PRESENT(p2[j]))
continue; continue;
p3 = IOMMU_PTE_PAGE(p2[j]); p3 = IOMMU_PTE_PAGE(p2[j]);
@ -910,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
if (address >= dom->aperture_size) if (address >= dom->aperture_size)
return; return;
WARN_ON(address & 0xfffULL || address > dom->aperture_size); WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
pte += IOMMU_PTE_L0_INDEX(address); pte += IOMMU_PTE_L0_INDEX(address);
@ -922,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
/* /*
* This function contains common code for mapping of a physically * This function contains common code for mapping of a physically
* contiguous memory region into DMA address space. It is uses by all * contiguous memory region into DMA address space. It is used by all
* mapping functions provided by this IOMMU driver. * mapping functions provided with this IOMMU driver.
* Must be called with the domain lock held. * Must be called with the domain lock held.
*/ */
static dma_addr_t __map_single(struct device *dev, static dma_addr_t __map_single(struct device *dev,
@ -983,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_addr_t i, start; dma_addr_t i, start;
unsigned int pages; unsigned int pages;
if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) if ((dma_addr == bad_dma_address) ||
(dma_addr + size > dma_dom->aperture_size))
return; return;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);

View File

@ -7,7 +7,8 @@
#include <asm/paravirt.h> #include <asm/paravirt.h>
static void default_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) static inline void
default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{ {
__raw_spin_lock(lock); __raw_spin_lock(lock);
} }

View File

@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout; rq->timeout = q->sg_timeout;
if (!rq->timeout) if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0; return 0;
} }

View File

@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
rq->timeout = q->sg_timeout; rq->timeout = q->sg_timeout;
if (!rq->timeout) if (!rq->timeout)
rq->timeout = BLK_DEFAULT_SG_TIMEOUT; rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
if (rq->timeout < BLK_MIN_SG_TIMEOUT)
rq->timeout = BLK_MIN_SG_TIMEOUT;
return 0; return 0;
} }

View File

@ -153,7 +153,7 @@ config SATA_PROMISE
If unsure, say N. If unsure, say N.
config SATA_SX4 config SATA_SX4
tristate "Promise SATA SX4 support" tristate "Promise SATA SX4 support (Experimental)"
depends on PCI && EXPERIMENTAL depends on PCI && EXPERIMENTAL
help help
This option enables support for Promise Serial ATA SX4. This option enables support for Promise Serial ATA SX4.
@ -219,8 +219,8 @@ config PATA_ACPI
otherwise unsupported hardware. otherwise unsupported hardware.
config PATA_ALI config PATA_ALI
tristate "ALi PATA support (Experimental)" tristate "ALi PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the ALi ATA interfaces This option enables support for the ALi ATA interfaces
found on the many ALi chipsets. found on the many ALi chipsets.
@ -263,7 +263,7 @@ config PATA_ATIIXP
If unsure, say N. If unsure, say N.
config PATA_CMD640_PCI config PATA_CMD640_PCI
tristate "CMD640 PCI PATA support (Very Experimental)" tristate "CMD640 PCI PATA support (Experimental)"
depends on PCI && EXPERIMENTAL depends on PCI && EXPERIMENTAL
help help
This option enables support for the CMD640 PCI IDE This option enables support for the CMD640 PCI IDE
@ -291,8 +291,8 @@ config PATA_CS5520
If unsure, say N. If unsure, say N.
config PATA_CS5530 config PATA_CS5530
tristate "CS5530 PATA support (Experimental)" tristate "CS5530 PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the Cyrix/NatSemi/AMD CS5530 This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family. companion chip used with the MediaGX/Geode processor family.
@ -309,8 +309,8 @@ config PATA_CS5535
If unsure, say N. If unsure, say N.
config PATA_CS5536 config PATA_CS5536
tristate "CS5536 PATA support (Experimental)" tristate "CS5536 PATA support"
depends on PCI && X86 && !X86_64 && EXPERIMENTAL depends on PCI && X86 && !X86_64
help help
This option enables support for the AMD CS5536 This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family. companion chip used with the Geode LX processor family.
@ -363,7 +363,7 @@ config PATA_HPT37X
If unsure, say N. If unsure, say N.
config PATA_HPT3X2N config PATA_HPT3X2N
tristate "HPT 372N/302N PATA support (Very Experimental)" tristate "HPT 372N/302N PATA support (Experimental)"
depends on PCI && EXPERIMENTAL depends on PCI && EXPERIMENTAL
help help
This option enables support for the N variant HPT PATA This option enables support for the N variant HPT PATA
@ -389,8 +389,8 @@ config PATA_HPT3X3_DMA
problems with DMA on this chipset. problems with DMA on this chipset.
config PATA_ISAPNP config PATA_ISAPNP
tristate "ISA Plug and Play PATA support (Experimental)" tristate "ISA Plug and Play PATA support"
depends on EXPERIMENTAL && ISAPNP depends on ISAPNP
help help
This option enables support for ISA plug & play ATA This option enables support for ISA plug & play ATA
controllers such as those found on old soundcards. controllers such as those found on old soundcards.
@ -498,8 +498,8 @@ config PATA_NINJA32
If unsure, say N. If unsure, say N.
config PATA_NS87410 config PATA_NS87410
tristate "Nat Semi NS87410 PATA support (Experimental)" tristate "Nat Semi NS87410 PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the National Semiconductor This option enables support for the National Semiconductor
NS87410 PCI-IDE controller. NS87410 PCI-IDE controller.
@ -507,8 +507,8 @@ config PATA_NS87410
If unsure, say N. If unsure, say N.
config PATA_NS87415 config PATA_NS87415
tristate "Nat Semi NS87415 PATA support (Experimental)" tristate "Nat Semi NS87415 PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the National Semiconductor This option enables support for the National Semiconductor
NS87415 PCI-IDE controller. NS87415 PCI-IDE controller.
@ -544,8 +544,8 @@ config PATA_PCMCIA
If unsure, say N. If unsure, say N.
config PATA_PDC_OLD config PATA_PDC_OLD
tristate "Older Promise PATA controller support (Experimental)" tristate "Older Promise PATA controller support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the Promise 20246, 20262, 20263, This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters. 20265 and 20267 adapters.
@ -559,7 +559,7 @@ config PATA_QDI
Support for QDI 6500 and 6580 PATA controllers on VESA local bus. Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
config PATA_RADISYS config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Very Experimental)" tristate "RADISYS 82600 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL depends on PCI && EXPERIMENTAL
help help
This option enables support for the RADISYS 82600 This option enables support for the RADISYS 82600
@ -586,8 +586,8 @@ config PATA_RZ1000
If unsure, say N. If unsure, say N.
config PATA_SC1200 config PATA_SC1200
tristate "SC1200 PATA support (Very Experimental)" tristate "SC1200 PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for the NatSemi/AMD SC1200 SoC This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family. companion chip used with the Geode processor family.
@ -620,8 +620,8 @@ config PATA_SIL680
If unsure, say N. If unsure, say N.
config PATA_SIS config PATA_SIS
tristate "SiS PATA support (Experimental)" tristate "SiS PATA support"
depends on PCI && EXPERIMENTAL depends on PCI
help help
This option enables support for SiS PATA controllers This option enables support for SiS PATA controllers

View File

@ -1072,7 +1072,14 @@ static int piix_broken_suspend(void)
* matching is necessary because dmi_system_id.matches is * matching is necessary because dmi_system_id.matches is
* limited to four entries. * limited to four entries.
*/ */
if (!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") && if (dmi_get_system_info(DMI_SYS_VENDOR) &&
dmi_get_system_info(DMI_PRODUCT_NAME) &&
dmi_get_system_info(DMI_PRODUCT_VERSION) &&
dmi_get_system_info(DMI_PRODUCT_SERIAL) &&
dmi_get_system_info(DMI_BOARD_VENDOR) &&
dmi_get_system_info(DMI_BOARD_NAME) &&
dmi_get_system_info(DMI_BOARD_VERSION) &&
!strcmp(dmi_get_system_info(DMI_SYS_VENDOR), "TOSHIBA") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") && !strcmp(dmi_get_system_info(DMI_PRODUCT_NAME), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") && !strcmp(dmi_get_system_info(DMI_PRODUCT_VERSION), "000000") &&
!strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") && !strcmp(dmi_get_system_info(DMI_PRODUCT_SERIAL), "000000") &&

View File

@ -382,10 +382,10 @@ static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
/* PCI clocking determines the ATA timing values to use */ /* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */ /* info_hpt366 is safe against re-entry so we can scribble on it */
switch((reg1 & 0x700) >> 8) { switch((reg1 & 0x700) >> 8) {
case 5: case 9:
hpriv = &hpt366_40; hpriv = &hpt366_40;
break; break;
case 9: case 5:
hpriv = &hpt366_25; hpriv = &hpt366_25;
break; break;
default: default:

View File

@ -44,7 +44,7 @@
#include <linux/libata.h> #include <linux/libata.h>
#define DRV_NAME "pata_ninja32" #define DRV_NAME "pata_ninja32"
#define DRV_VERSION "0.1.1" #define DRV_VERSION "0.1.3"
/** /**
@ -130,7 +130,8 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id)
return rc; return rc;
pci_set_master(dev); pci_set_master(dev);
/* Set up the register mappings */ /* Set up the register mappings. We use the I/O mapping as only the
older chips also have MMIO on BAR 1 */
base = host->iomap[0]; base = host->iomap[0];
if (!base) if (!base)
return -ENOMEM; return -ENOMEM;
@ -167,8 +168,12 @@ static int ninja32_reinit_one(struct pci_dev *pdev)
#endif #endif
static const struct pci_device_id ninja32[] = { static const struct pci_device_id ninja32[] = {
{ 0x10FC, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0x8008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1145, 0xf024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1145, 0xf02C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ }, { },
}; };

View File

@ -56,7 +56,6 @@ static const struct sis_laptop sis_laptop[] = {
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */ { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */ { 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
{ 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
{ 0x5513, 0x1039, 0x5513 }, /* Targa Visionary 1000 */
/* end marker */ /* end marker */
{ 0, } { 0, }
}; };

View File

@ -847,9 +847,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* and the registers being closely associated. * and the registers being closely associated.
* *
* According to chipset errata, on the 965GM, MSI interrupts may * According to chipset errata, on the 965GM, MSI interrupts may
* be lost or delayed * be lost or delayed, but we use them anyways to avoid
* stuck interrupts on some machines.
*/ */
if (!IS_I945G(dev) && !IS_I945GM(dev) && !IS_I965GM(dev)) if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev); pci_enable_msi(dev->pdev);
intel_opregion_init(dev); intel_opregion_init(dev);

View File

@ -1104,6 +1104,8 @@ i915_gem_evict_everything(struct drm_device *dev)
if (ret != 0) if (ret != 0)
break; break;
} }
if (ret == -ENOMEM)
return 0;
return ret; return ret;
} }

View File

@ -299,7 +299,6 @@ typedef struct drm_radeon_private {
atomic_t swi_emitted; atomic_t swi_emitted;
int vblank_crtc; int vblank_crtc;
uint32_t irq_enable_reg; uint32_t irq_enable_reg;
int irq_enabled;
uint32_t r500_disp_irq_reg; uint32_t r500_disp_irq_reg;
struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_surface surfaces[RADEON_MAX_SURFACES];

View File

@ -44,7 +44,8 @@ void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
else else
dev_priv->irq_enable_reg &= ~mask; dev_priv->irq_enable_reg &= ~mask;
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); if (!dev->irq_enabled)
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
} }
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
@ -56,7 +57,8 @@ static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
else else
dev_priv->r500_disp_irq_reg &= ~mask; dev_priv->r500_disp_irq_reg &= ~mask;
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); if (!dev->irq_enabled)
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
} }
int radeon_enable_vblank(struct drm_device *dev, int crtc) int radeon_enable_vblank(struct drm_device *dev, int crtc)
@ -355,8 +357,6 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
if (!dev_priv) if (!dev_priv)
return; return;
dev_priv->irq_enabled = 0;
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
RADEON_WRITE(R500_DxMODE_INT_MASK, 0); RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
/* Disable *all* interrupts */ /* Disable *all* interrupts */

View File

@ -669,10 +669,12 @@ config BLK_DEV_CELLEB
endif endif
# TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
config BLK_DEV_IDE_PMAC config BLK_DEV_IDE_PMAC
tristate "PowerMac on-board IDE support" tristate "PowerMac on-board IDE support"
depends on PPC_PMAC && IDE=y depends on PPC_PMAC && IDE=y
select IDE_TIMINGS select IDE_TIMINGS
select BLK_DEV_IDEDMA_PCI
help help
This driver provides support for the on-board IDE controller on This driver provides support for the on-board IDE controller on
most of the recent Apple Power Macintoshes and PowerBooks. most of the recent Apple Power Macintoshes and PowerBooks.
@ -689,16 +691,6 @@ config BLK_DEV_IDE_PMAC_ATA100FIRST
CD-ROM on hda. This option changes this to more natural hda for CD-ROM on hda. This option changes this to more natural hda for
hard disk and hdc for CD-ROM. hard disk and hdc for CD-ROM.
config BLK_DEV_IDEDMA_PMAC
bool "PowerMac IDE DMA support"
depends on BLK_DEV_IDE_PMAC
select BLK_DEV_IDEDMA_PCI
help
This option allows the driver for the on-board IDE controller on
Power Macintoshes and PowerBooks to use DMA (direct memory access)
to transfer data to and from memory. Saying Y is safe and improves
performance.
config BLK_DEV_IDE_AU1XXX config BLK_DEV_IDE_AU1XXX
bool "IDE for AMD Alchemy Au1200" bool "IDE for AMD Alchemy Au1200"
depends on SOC_AU1200 depends on SOC_AU1200
@ -912,7 +904,7 @@ config BLK_DEV_UMC8672
endif endif
config BLK_DEV_IDEDMA config BLK_DEV_IDEDMA
def_bool BLK_DEV_IDEDMA_SFF || BLK_DEV_IDEDMA_PMAC || \ def_bool BLK_DEV_IDEDMA_SFF || \
BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
endif # IDE endif # IDE

View File

@ -208,8 +208,10 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
*/ */
if (drive->hwif->dma_ops == NULL) if (drive->hwif->dma_ops == NULL)
break; break;
if (drive->dev_flags & IDE_DFLAG_USING_DMA) /*
ide_set_dma(drive); * TODO: respect IDE_DFLAG_USING_DMA
*/
ide_set_dma(drive);
break; break;
} }

View File

@ -66,7 +66,6 @@ typedef struct pmac_ide_hwif {
struct macio_dev *mdev; struct macio_dev *mdev;
u32 timings[4]; u32 timings[4];
volatile u32 __iomem * *kauai_fcr; volatile u32 __iomem * *kauai_fcr;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Those fields are duplicating what is in hwif. We currently /* Those fields are duplicating what is in hwif. We currently
* can't use the hwif ones because of some assumptions that are * can't use the hwif ones because of some assumptions that are
* beeing done by the generic code about the kind of dma controller * beeing done by the generic code about the kind of dma controller
@ -74,8 +73,6 @@ typedef struct pmac_ide_hwif {
*/ */
volatile struct dbdma_regs __iomem * dma_regs; volatile struct dbdma_regs __iomem * dma_regs;
struct dbdma_cmd* dma_table_cpu; struct dbdma_cmd* dma_table_cpu;
#endif
} pmac_ide_hwif_t; } pmac_ide_hwif_t;
enum { enum {
@ -222,8 +219,6 @@ static const char* model_name[] = {
#define KAUAI_FCR_UATA_RESET_N 0x00000002 #define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001 #define KAUAI_FCR_UATA_ENABLE 0x00000001
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* Rounded Multiword DMA timings /* Rounded Multiword DMA timings
* *
* I gave up finding a generic formula for all controller * I gave up finding a generic formula for all controller
@ -413,8 +408,6 @@ static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
static void pmac_ide_selectproc(ide_drive_t *drive); static void pmac_ide_selectproc(ide_drive_t *drive);
static void pmac_ide_kauai_selectproc(ide_drive_t *drive); static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
#define PMAC_IDE_REG(x) \ #define PMAC_IDE_REG(x) \
((void __iomem *)((drive)->hwif->io_ports.data_addr + (x))) ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
@ -584,8 +577,6 @@ pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
pmac_ide_do_update_timings(drive); pmac_ide_do_update_timings(drive);
} }
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* /*
* Calculate KeyLargo ATA/66 UDMA timings * Calculate KeyLargo ATA/66 UDMA timings
*/ */
@ -786,7 +777,6 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
drive->name, speed & 0xf, *timings); drive->name, speed & 0xf, *timings);
#endif #endif
} }
#endif /* #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC */
static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
{ {
@ -804,7 +794,6 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
tl[0] = *timings; tl[0] = *timings;
tl[1] = *timings2; tl[1] = *timings2;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (speed >= XFER_UDMA_0) { if (speed >= XFER_UDMA_0) {
if (pmif->kind == controller_kl_ata4) if (pmif->kind == controller_kl_ata4)
ret = set_timings_udma_ata4(&tl[0], speed); ret = set_timings_udma_ata4(&tl[0], speed);
@ -817,7 +806,7 @@ static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
ret = -1; ret = -1;
} else } else
set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed); set_timings_mdma(drive, pmif->kind, &tl[0], &tl[1], speed);
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
if (ret) if (ret)
return; return;
@ -1008,9 +997,7 @@ static const struct ide_port_info pmac_port_info = {
.chipset = ide_pmac, .chipset = ide_pmac,
.tp_ops = &pmac_tp_ops, .tp_ops = &pmac_tp_ops,
.port_ops = &pmac_ide_port_ops, .port_ops = &pmac_ide_port_ops,
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops, .dma_ops = &pmac_dma_ops,
#endif
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | .host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE | IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_MMIO | IDE_HFLAG_MMIO |
@ -1182,7 +1169,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->regbase = regbase; pmif->regbase = regbase;
pmif->irq = irq; pmif->irq = irq;
pmif->kauai_fcr = NULL; pmif->kauai_fcr = NULL;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
if (macio_resource_count(mdev) >= 2) { if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "ide-pmac (dma)")) if (macio_request_resource(mdev, 1, "ide-pmac (dma)"))
printk(KERN_WARNING "ide-pmac: can't request DMA " printk(KERN_WARNING "ide-pmac: can't request DMA "
@ -1192,7 +1179,7 @@ pmac_ide_macio_attach(struct macio_dev *mdev, const struct of_device_id *match)
pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000); pmif->dma_regs = ioremap(macio_resource_start(mdev, 1), 0x1000);
} else } else
pmif->dma_regs = NULL; pmif->dma_regs = NULL;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
dev_set_drvdata(&mdev->ofdev.dev, pmif); dev_set_drvdata(&mdev->ofdev.dev, pmif);
memset(&hw, 0, sizeof(hw)); memset(&hw, 0, sizeof(hw));
@ -1300,9 +1287,7 @@ pmac_ide_pci_attach(struct pci_dev *pdev, const struct pci_device_id *id)
base = ioremap(rbase, rlen); base = ioremap(rbase, rlen);
pmif->regbase = (unsigned long) base + 0x2000; pmif->regbase = (unsigned long) base + 0x2000;
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
pmif->dma_regs = base + 0x1000; pmif->dma_regs = base + 0x1000;
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
pmif->kauai_fcr = base; pmif->kauai_fcr = base;
pmif->irq = pdev->irq; pmif->irq = pdev->irq;
@ -1434,8 +1419,6 @@ out:
return error; return error;
} }
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
/* /*
* pmac_ide_build_dmatable builds the DBDMA command list * pmac_ide_build_dmatable builds the DBDMA command list
* for a transfer and sets the DBDMA channel to point to it. * for a transfer and sets the DBDMA channel to point to it.
@ -1723,13 +1706,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
return 0; return 0;
} }
#else
static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
module_init(pmac_ide_probe); module_init(pmac_ide_probe);

View File

@ -233,10 +233,8 @@ static void __exit b1isa_exit(void)
int i; int i;
for (i = 0; i < MAX_CARDS; i++) { for (i = 0; i < MAX_CARDS; i++) {
if (!io[i]) if (isa_dev[i].resource[0].start)
break; b1isa_remove(&isa_dev[i]);
b1isa_remove(&isa_dev[i]);
} }
unregister_capi_driver(&capi_driver_b1isa); unregister_capi_driver(&capi_driver_b1isa);
} }

View File

@ -83,12 +83,12 @@ net_open(struct net_device *dev)
/* Fill in the MAC-level header (if not already set) */ /* Fill in the MAC-level header (if not already set) */
if (!card->mac_addr[0]) { if (!card->mac_addr[0]) {
for (i = 0; i < ETH_ALEN - sizeof(unsigned long); i++) for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = 0xfc; dev->dev_addr[i] = 0xfc;
if ((in_dev = dev->ip_ptr) != NULL) { if ((in_dev = dev->ip_ptr) != NULL) {
struct in_ifaddr *ifa = in_dev->ifa_list; struct in_ifaddr *ifa = in_dev->ifa_list;
if (ifa != NULL) if (ifa != NULL)
memcpy(dev->dev_addr + (ETH_ALEN - sizeof(unsigned long)), &ifa->ifa_local, sizeof(unsigned long)); memcpy(dev->dev_addr + (ETH_ALEN - sizeof(ifa->ifa_local)), &ifa->ifa_local, sizeof(ifa->ifa_local));
} }
} else } else
memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN); memcpy(dev->dev_addr, card->mac_addr, ETH_ALEN);

View File

@ -37,9 +37,9 @@
#define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */ #define OPCODE_NORM_READ 0x03 /* Read data bytes (low frequency) */
#define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */ #define OPCODE_FAST_READ 0x0b /* Read data bytes (high frequency) */
#define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */ #define OPCODE_PP 0x02 /* Page program (up to 256 bytes) */
#define OPCODE_BE_4K 0x20 /* Erase 4KiB block */ #define OPCODE_BE_4K 0x20 /* Erase 4KiB block */
#define OPCODE_BE_32K 0x52 /* Erase 32KiB block */ #define OPCODE_BE_32K 0x52 /* Erase 32KiB block */
#define OPCODE_BE 0xc7 /* Erase whole flash block */ #define OPCODE_CHIP_ERASE 0xc7 /* Erase whole flash chip */
#define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */ #define OPCODE_SE 0xd8 /* Sector erase (usually 64KiB) */
#define OPCODE_RDID 0x9f /* Read JEDEC ID */ #define OPCODE_RDID 0x9f /* Read JEDEC ID */
@ -167,7 +167,7 @@ static int wait_till_ready(struct m25p *flash)
* *
* Returns 0 if successful, non-zero otherwise. * Returns 0 if successful, non-zero otherwise.
*/ */
static int erase_block(struct m25p *flash) static int erase_chip(struct m25p *flash)
{ {
DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n", DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB\n",
flash->spi->dev.bus_id, __func__, flash->spi->dev.bus_id, __func__,
@ -181,7 +181,7 @@ static int erase_block(struct m25p *flash)
write_enable(flash); write_enable(flash);
/* Set up command buffer. */ /* Set up command buffer. */
flash->command[0] = OPCODE_BE; flash->command[0] = OPCODE_CHIP_ERASE;
spi_write(flash->spi, flash->command, 1); spi_write(flash->spi, flash->command, 1);
@ -250,15 +250,18 @@ static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
mutex_lock(&flash->lock); mutex_lock(&flash->lock);
/* REVISIT in some cases we could speed up erasing large regions /* whole-chip erase? */
* by using OPCODE_SE instead of OPCODE_BE_4K if (len == flash->mtd.size && erase_chip(flash)) {
*/
/* now erase those sectors */
if (len == flash->mtd.size && erase_block(flash)) {
instr->state = MTD_ERASE_FAILED; instr->state = MTD_ERASE_FAILED;
mutex_unlock(&flash->lock); mutex_unlock(&flash->lock);
return -EIO; return -EIO;
/* REVISIT in some cases we could speed up erasing large regions
* by using OPCODE_SE instead of OPCODE_BE_4K. We may have set up
* to use "small sector erase", but that's not always optimal.
*/
/* "sector"-at-a-time erase */
} else { } else {
while (len) { while (len) {
if (erase_sector(flash, addr)) { if (erase_sector(flash, addr)) {
@ -574,10 +577,11 @@ static struct flash_info *__devinit jedec_probe(struct spi_device *spi)
for (tmp = 0, info = m25p_data; for (tmp = 0, info = m25p_data;
tmp < ARRAY_SIZE(m25p_data); tmp < ARRAY_SIZE(m25p_data);
tmp++, info++) { tmp++, info++) {
if (info->jedec_id == jedec) if (info->jedec_id == jedec) {
if (ext_jedec != 0 && info->ext_id != ext_jedec) if (info->ext_id != 0 && info->ext_id != ext_jedec)
continue; continue;
return info; return info;
}
} }
dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec); dev_err(&spi->dev, "unrecognized JEDEC id %06x\n", jedec);
return NULL; return NULL;

View File

@ -19,7 +19,7 @@
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/concat.h> #include <linux/mtd/concat.h>
#include <asm/io.h> #include <linux/io.h>
#define MAX_RESOURCES 4 #define MAX_RESOURCES 4
@ -27,7 +27,6 @@ struct physmap_flash_info {
struct mtd_info *mtd[MAX_RESOURCES]; struct mtd_info *mtd[MAX_RESOURCES];
struct mtd_info *cmtd; struct mtd_info *cmtd;
struct map_info map[MAX_RESOURCES]; struct map_info map[MAX_RESOURCES];
struct resource *res;
#ifdef CONFIG_MTD_PARTITIONS #ifdef CONFIG_MTD_PARTITIONS
int nr_parts; int nr_parts;
struct mtd_partition *parts; struct mtd_partition *parts;
@ -70,16 +69,7 @@ static int physmap_flash_remove(struct platform_device *dev)
#endif #endif
map_destroy(info->mtd[i]); map_destroy(info->mtd[i]);
} }
if (info->map[i].virt != NULL)
iounmap(info->map[i].virt);
} }
if (info->res != NULL) {
release_resource(info->res);
kfree(info->res);
}
return 0; return 0;
} }
@ -101,7 +91,8 @@ static int physmap_flash_probe(struct platform_device *dev)
if (physmap_data == NULL) if (physmap_data == NULL)
return -ENODEV; return -ENODEV;
info = kzalloc(sizeof(struct physmap_flash_info), GFP_KERNEL); info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
GFP_KERNEL);
if (info == NULL) { if (info == NULL) {
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
@ -114,10 +105,10 @@ static int physmap_flash_probe(struct platform_device *dev)
(unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1), (unsigned long long)(dev->resource[i].end - dev->resource[i].start + 1),
(unsigned long long)dev->resource[i].start); (unsigned long long)dev->resource[i].start);
info->res = request_mem_region(dev->resource[i].start, if (!devm_request_mem_region(&dev->dev,
dev->resource[i].end - dev->resource[i].start + 1, dev->resource[i].start,
dev->dev.bus_id); dev->resource[i].end - dev->resource[i].start + 1,
if (info->res == NULL) { dev->dev.bus_id)) {
dev_err(&dev->dev, "Could not reserve memory region\n"); dev_err(&dev->dev, "Could not reserve memory region\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out; goto err_out;
@ -129,7 +120,8 @@ static int physmap_flash_probe(struct platform_device *dev)
info->map[i].bankwidth = physmap_data->width; info->map[i].bankwidth = physmap_data->width;
info->map[i].set_vpp = physmap_data->set_vpp; info->map[i].set_vpp = physmap_data->set_vpp;
info->map[i].virt = ioremap(info->map[i].phys, info->map[i].size); info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
info->map[i].size);
if (info->map[i].virt == NULL) { if (info->map[i].virt == NULL) {
dev_err(&dev->dev, "Failed to ioremap flash region\n"); dev_err(&dev->dev, "Failed to ioremap flash region\n");
err = EIO; err = EIO;

View File

@ -163,9 +163,11 @@ static int __devinit fun_chip_init(struct fsl_upm_nand *fun,
ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0); ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
#ifdef CONFIG_MTD_OF_PARTS #ifdef CONFIG_MTD_OF_PARTS
if (ret == 0) if (ret == 0) {
ret = of_mtd_parse_partitions(fun->dev, &fun->mtd, ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
flash_np, &fun->parts); if (ret < 0)
goto err;
}
#endif #endif
if (ret > 0) if (ret > 0)
ret = add_mtd_partitions(&fun->mtd, fun->parts, ret); ret = add_mtd_partitions(&fun->mtd, fun->parts, ret);

View File

@ -141,6 +141,7 @@ static int __devinit pasemi_nand_probe(struct of_device *ofdev,
} }
lpcctl = pci_resource_start(pdev, 0); lpcctl = pci_resource_start(pdev, 0);
pci_dev_put(pdev);
if (!request_region(lpcctl, 4, driver_name)) { if (!request_region(lpcctl, 4, driver_name)) {
err = -EBUSY; err = -EBUSY;

View File

@ -269,6 +269,7 @@ static struct pxa3xx_nand_timing stm2GbX16_timing = {
static struct pxa3xx_nand_flash stm2GbX16 = { static struct pxa3xx_nand_flash stm2GbX16 = {
.timing = &stm2GbX16_timing, .timing = &stm2GbX16_timing,
.cmdset = &largepage_cmdset,
.page_per_block = 64, .page_per_block = 64,
.page_size = 2048, .page_size = 2048,
.flash_width = 16, .flash_width = 16,

View File

@ -32,19 +32,18 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/io.h>
#include <asm/mach/flash.h>
#include <asm/arch/gpmc.h>
#include <asm/arch/onenand.h>
#include <asm/arch/gpio.h>
#include <asm/arch/pm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/dma-mapping.h> #include <linux/io.h>
#include <asm/arch/dma.h>
#include <asm/arch/board.h> #include <asm/mach/flash.h>
#include <mach/gpmc.h>
#include <mach/onenand.h>
#include <mach/gpio.h>
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/board.h>
#define DRIVER_NAME "omap2-onenand" #define DRIVER_NAME "omap2-onenand"

View File

@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0; return 0;
} }
static void
bnx2_chk_missed_msi(struct bnx2 *bp)
{
struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
u32 msi_ctrl;
if (bnx2_has_work(bnapi)) {
msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
return;
if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
~BNX2_PCICFG_MSI_CONTROL_ENABLE);
REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
bnx2_msi(bp->irq_tbl[0].vector, bnapi);
}
}
bp->idle_chk_status_idx = bnapi->last_status_idx;
}
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi) static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{ {
struct status_block *sblk = bnapi->status_blk.msi; struct status_block *sblk = bnapi->status_blk.msi;
@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget); work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
if (unlikely(work_done >= budget))
break;
/* bnapi->last_status_idx is used below to tell the hw how /* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before * much work has been processed, so we must read it before
* checking for more work. * checking for more work.
*/ */
bnapi->last_status_idx = sblk->status_idx; bnapi->last_status_idx = sblk->status_idx;
if (unlikely(work_done >= budget))
break;
rmb(); rmb();
if (likely(!bnx2_has_work(bnapi))) { if (likely(!bnx2_has_work(bnapi))) {
netif_rx_complete(bp->dev, napi); netif_rx_complete(bp->dev, napi);
@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->bnx2_napi[i].last_status_idx = 0; bp->bnx2_napi[i].last_status_idx = 0;
bp->idle_chk_status_idx = 0xffff;
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE; bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */ /* Set up how to generate a link change interrupt. */
@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0) if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer; goto bnx2_restart_timer;
if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
BNX2_FLAG_USING_MSI)
bnx2_chk_missed_msi(bp);
bnx2_send_heart_beat(bp); bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop = bp->stats_blk->stat_FwRxDrop =

View File

@ -378,6 +378,9 @@ struct l2_fhdr {
* pci_config_l definition * pci_config_l definition
* offset: 0000 * offset: 0000
*/ */
#define BNX2_PCICFG_MSI_CONTROL 0x00000058
#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
#define BNX2_PCICFG_MISC_CONFIG 0x00000068 #define BNX2_PCICFG_MISC_CONFIG 0x00000068
#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2) #define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3) #define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@ -6863,6 +6866,9 @@ struct bnx2 {
u8 num_tx_rings; u8 num_tx_rings;
u8 num_rx_rings; u8 num_rx_rings;
u32 idle_chk_status_idx;
}; };
#define REG_RD(bp, offset) \ #define REG_RD(bp, offset) \

View File

@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
return erxrdpt; return erxrdpt;
} }
/*
* Calculate wrap around when reading beyond the end of the RX buffer
*/
static u16 rx_packet_start(u16 ptr)
{
if (ptr + RSV_SIZE > RXEND_INIT)
return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
else
return ptr + RSV_SIZE;
}
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end) static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{ {
u16 erxrdpt; u16 erxrdpt;
@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb->dev = ndev; skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN); skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */ /* copy the packet from the receive buffer */
enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), enc28j60_mem_read(priv,
len, skb_put(skb, len)); rx_packet_start(priv->next_pk_ptr),
len, skb_put(skb, len));
if (netif_msg_pktdata(priv)) if (netif_msg_pktdata(priv))
dump_packet(__func__, skb->len, skb->data); dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);

View File

@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv->xmac_base = priv->xc->xmac_base; priv->xmac_base = priv->xc->xmac_base;
priv->sram_base = priv->xc->sram_base; priv->sram_base = priv->xc->sram_base;
spin_lock_init(&priv->lock);
ret = pfifo_request(PFIFO_MASK(priv->id)); ret = pfifo_request(PFIFO_MASK(priv->id));
if (ret) { if (ret) {
printk("unable to request PFIFO\n"); printk("unable to request PFIFO\n");

View File

@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
return 0; return 0;
ipw_send_disassociate(data, 0); ipw_send_disassociate(data, 0);
netif_carrier_off(priv->net_dev);
return 1; return 1;
} }
@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes; u16 remaining_bytes;
int fc; int fc;
if (!(priv->status & STATUS_ASSOCIATED))
goto drop;
hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) { switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: case IW_MODE_ADHOC:

View File

@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
priv->num_stations = 0; priv->num_stations = 0;
memset(priv->stations, 0, sizeof(priv->stations)); memset(priv->stations, 0, sizeof(priv->stations));
/* clean ucode key table bit map */
priv->ucode_key_table = 0;
spin_unlock_irqrestore(&priv->sta_lock, flags); spin_unlock_irqrestore(&priv->sta_lock, flags);
} }
EXPORT_SYMBOL(iwl_clear_stations_table); EXPORT_SYMBOL(iwl_clear_stations_table);

View File

@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
if (!test_and_set_bit(i, &priv->ucode_key_table)) if (!test_and_set_bit(i, &priv->ucode_key_table))
return i; return i;
return -1; return WEP_INVALID_OFFSET;
} }
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room /* else, we are overriding an existing key => no need to allocated room
* in uCode. */ * in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
{ {
unsigned long flags; unsigned long flags;
__le16 key_flags = 0; __le16 key_flags = 0;
int ret;
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room /* else, we are overriding an existing key => no need to allocated room
* in uCode. */ * in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
priv->stations[sta_id].sta.key.key_flags = key_flags; priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags); spin_unlock_irqrestore(&priv->sta_lock, flags);
IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); return ret;
return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
} }
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv, static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room /* else, we are overriding an existing key => no need to allocated room
* in uCode. */ * in uCode. */
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
"no space for new kew");
/* This copy is acutally not needed: we get the key with each TX */ /* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16); memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
return 0; return 0;
} }
if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
IWL_WARNING("Removing wrong key %d 0x%x\n",
keyconf->keyidx, key_flags);
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset, if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
&priv->ucode_key_table)) &priv->ucode_key_table))
IWL_ERROR("index %d not used in uCode key table.\n", IWL_ERROR("index %d not used in uCode key table.\n",

View File

@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_hdr *tx_hdr; struct ieee80211_hdr *tx_hdr;
tx_hdr = (struct ieee80211_hdr *)skb->data; tx_hdr = (struct ieee80211_hdr *)skb->data;
if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1))) if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{ {
__skb_unlink(skb, q); __skb_unlink(skb, q);
tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1); tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);

View File

@ -256,7 +256,8 @@ void radeonfb_imageblit(struct fb_info *info, const struct fb_image *image)
return; return;
/* We only do 1 bpp color expansion for now */ /* We only do 1 bpp color expansion for now */
if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1) if (!accel_cexp ||
(info->flags & FBINFO_HWACCEL_DISABLED) || image->depth != 1)
goto fallback; goto fallback;
/* Fallback if running out of the screen. We may do clipping /* Fallback if running out of the screen. We may do clipping

View File

@ -282,6 +282,8 @@ static int backlight = 1;
static int backlight = 0; static int backlight = 0;
#endif #endif
int accel_cexp = 0;
/* /*
* prototypes * prototypes
*/ */
@ -2520,6 +2522,8 @@ static int __init radeonfb_setup (char *options)
} else if (!strncmp(this_opt, "ignore_devlist", 14)) { } else if (!strncmp(this_opt, "ignore_devlist", 14)) {
ignore_devlist = 1; ignore_devlist = 1;
#endif #endif
} else if (!strncmp(this_opt, "accel_cexp", 12)) {
accel_cexp = 1;
} else } else
mode_option = this_opt; mode_option = this_opt;
} }
@ -2567,6 +2571,8 @@ module_param(monitor_layout, charp, 0);
MODULE_PARM_DESC(monitor_layout, "Specify monitor mapping (like XFree86)"); MODULE_PARM_DESC(monitor_layout, "Specify monitor mapping (like XFree86)");
module_param(force_measure_pll, bool, 0); module_param(force_measure_pll, bool, 0);
MODULE_PARM_DESC(force_measure_pll, "Force measurement of PLL (debug)"); MODULE_PARM_DESC(force_measure_pll, "Force measurement of PLL (debug)");
module_param(accel_cexp, bool, 0);
MODULE_PARM_DESC(accel_cexp, "Use acceleration engine for color expansion");
#ifdef CONFIG_MTRR #ifdef CONFIG_MTRR
module_param(nomtrr, bool, 0); module_param(nomtrr, bool, 0);
MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers"); MODULE_PARM_DESC(nomtrr, "bool: disable use of MTRR registers");

View File

@ -638,4 +638,6 @@ static inline void radeonfb_bl_init(struct radeonfb_info *rinfo) {}
static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {} static inline void radeonfb_bl_exit(struct radeonfb_info *rinfo) {}
#endif #endif
extern int accel_cexp;
#endif /* __RADEONFB_H__ */ #endif /* __RADEONFB_H__ */

View File

@ -392,7 +392,7 @@ static void set_fb_fix(struct fb_info *fbi)
int bpp; int bpp;
rg = &plane->fbdev->mem_desc.region[plane->idx]; rg = &plane->fbdev->mem_desc.region[plane->idx];
fbi->screen_base = (char __iomem *)rg->vaddr; fbi->screen_base = rg->vaddr;
fix->smem_start = rg->paddr; fix->smem_start = rg->paddr;
fix->smem_len = rg->size; fix->smem_len = rg->size;

View File

@ -367,6 +367,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
* Try to get any dentry for the given file handle from the filesystem. * Try to get any dentry for the given file handle from the filesystem.
*/ */
result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type); result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
if (!result)
result = ERR_PTR(-ESTALE);
if (IS_ERR(result)) if (IS_ERR(result))
return result; return result;
@ -420,6 +422,8 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
target_dir = nop->fh_to_parent(mnt->mnt_sb, fid, target_dir = nop->fh_to_parent(mnt->mnt_sb, fid,
fh_len, fileid_type); fh_len, fileid_type);
if (!target_dir)
goto err_result;
err = PTR_ERR(target_dir); err = PTR_ERR(target_dir);
if (IS_ERR(target_dir)) if (IS_ERR(target_dir))
goto err_result; goto err_result;

View File

@ -19,6 +19,7 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/pid_namespace.h> #include <linux/pid_namespace.h>
#include <linux/smp_lock.h>
#include <asm/poll.h> #include <asm/poll.h>
#include <asm/siginfo.h> #include <asm/siginfo.h>
@ -175,6 +176,11 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
if (error) if (error)
return error; return error;
/*
* We still need a lock here for now to keep multiple FASYNC calls
* from racing with each other.
*/
lock_kernel();
if ((arg ^ filp->f_flags) & FASYNC) { if ((arg ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync) { if (filp->f_op && filp->f_op->fasync) {
error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
@ -185,6 +191,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
out: out:
unlock_kernel();
return error; return error;
} }

View File

@ -400,11 +400,9 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp,
/* Did FASYNC state change ? */ /* Did FASYNC state change ? */
if ((flag ^ filp->f_flags) & FASYNC) { if ((flag ^ filp->f_flags) & FASYNC) {
if (filp->f_op && filp->f_op->fasync) { if (filp->f_op && filp->f_op->fasync)
lock_kernel();
error = filp->f_op->fasync(fd, filp, on); error = filp->f_op->fasync(fd, filp, on);
unlock_kernel(); else
} else
error = -ENOTTY; error = -ENOTTY;
} }
if (error) if (error)
@ -440,11 +438,17 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break; break;
case FIONBIO: case FIONBIO:
/* BKL needed to avoid races tweaking f_flags */
lock_kernel();
error = ioctl_fionbio(filp, argp); error = ioctl_fionbio(filp, argp);
unlock_kernel();
break; break;
case FIOASYNC: case FIOASYNC:
/* BKL needed to avoid races tweaking f_flags */
lock_kernel();
error = ioctl_fioasync(fd, filp, argp); error = ioctl_fioasync(fd, filp, argp);
unlock_kernel();
break; break;
case FIOQSIZE: case FIOQSIZE:

View File

@ -1,6 +1,8 @@
#include <asm-generic/audit_dir_write.h> #include <asm-generic/audit_dir_write.h>
__NR_acct, __NR_acct,
#ifdef __NR_swapon
__NR_swapon, __NR_swapon,
#endif
__NR_quotactl, __NR_quotactl,
__NR_truncate, __NR_truncate,
#ifdef __NR_truncate64 #ifdef __NR_truncate64

View File

@ -391,6 +391,7 @@ extern int audit_classify_arch(int arch);
#ifdef CONFIG_AUDITSYSCALL #ifdef CONFIG_AUDITSYSCALL
/* These are defined in auditsc.c */ /* These are defined in auditsc.c */
/* Public API */ /* Public API */
extern void audit_finish_fork(struct task_struct *child);
extern int audit_alloc(struct task_struct *task); extern int audit_alloc(struct task_struct *task);
extern void audit_free(struct task_struct *task); extern void audit_free(struct task_struct *task);
extern void audit_syscall_entry(int arch, extern void audit_syscall_entry(int arch,
@ -434,7 +435,7 @@ static inline void audit_ptrace(struct task_struct *t)
/* Private API (for audit.c only) */ /* Private API (for audit.c only) */
extern unsigned int audit_serial(void); extern unsigned int audit_serial(void);
extern void auditsc_get_stamp(struct audit_context *ctx, extern int auditsc_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial); struct timespec *t, unsigned int *serial);
extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
#define audit_get_loginuid(t) ((t)->loginuid) #define audit_get_loginuid(t) ((t)->loginuid)
@ -504,6 +505,7 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
extern int audit_n_rules; extern int audit_n_rules;
extern int audit_signals; extern int audit_signals;
#else #else
#define audit_finish_fork(t)
#define audit_alloc(t) ({ 0; }) #define audit_alloc(t) ({ 0; })
#define audit_free(t) do { ; } while (0) #define audit_free(t) do { ; } while (0)
#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
@ -516,7 +518,7 @@ extern int audit_signals;
#define audit_inode(n,d) do { ; } while (0) #define audit_inode(n,d) do { ; } while (0)
#define audit_inode_child(d,i,p) do { ; } while (0) #define audit_inode_child(d,i,p) do { ; } while (0)
#define audit_core_dumps(i) do { ; } while (0) #define audit_core_dumps(i) do { ; } while (0)
#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) #define auditsc_get_stamp(c,t,s) (0)
#define audit_get_loginuid(t) (-1) #define audit_get_loginuid(t) (-1)
#define audit_get_sessionid(t) (-1) #define audit_get_sessionid(t) (-1)
#define audit_log_task_context(b) do { ; } while (0) #define audit_log_task_context(b) do { ; } while (0)

View File

@ -662,6 +662,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
* default timeout for SG_IO if none specified * default timeout for SG_IO if none specified
*/ */
#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
#define BLK_MIN_SG_TIMEOUT (7 * HZ)
#ifdef CONFIG_BOUNCE #ifdef CONFIG_BOUNCE
extern int init_emergency_isa_pool(void); extern int init_emergency_isa_pool(void);

View File

@ -19,7 +19,7 @@
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#define CAN_VERSION "20071116" #define CAN_VERSION "20081130"
/* increment this number each time you change some user-space interface */ /* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "8" #define CAN_ABI_VERSION "8"

View File

@ -61,8 +61,11 @@
#include "audit.h" #include "audit.h"
/* No auditing will take place until audit_initialized != 0. /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
* (Initialization happens after skb_init is called.) */ * (Initialization happens after skb_init is called.) */
#define AUDIT_DISABLED -1
#define AUDIT_UNINITIALIZED 0
#define AUDIT_INITIALIZED 1
static int audit_initialized; static int audit_initialized;
#define AUDIT_OFF 0 #define AUDIT_OFF 0
@ -965,6 +968,9 @@ static int __init audit_init(void)
{ {
int i; int i;
if (audit_initialized == AUDIT_DISABLED)
return 0;
printk(KERN_INFO "audit: initializing netlink socket (%s)\n", printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
audit_default ? "enabled" : "disabled"); audit_default ? "enabled" : "disabled");
audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
@ -976,7 +982,7 @@ static int __init audit_init(void)
skb_queue_head_init(&audit_skb_queue); skb_queue_head_init(&audit_skb_queue);
skb_queue_head_init(&audit_skb_hold_queue); skb_queue_head_init(&audit_skb_hold_queue);
audit_initialized = 1; audit_initialized = AUDIT_INITIALIZED;
audit_enabled = audit_default; audit_enabled = audit_default;
audit_ever_enabled |= !!audit_default; audit_ever_enabled |= !!audit_default;
@ -999,13 +1005,21 @@ __initcall(audit_init);
static int __init audit_enable(char *str) static int __init audit_enable(char *str)
{ {
audit_default = !!simple_strtol(str, NULL, 0); audit_default = !!simple_strtol(str, NULL, 0);
printk(KERN_INFO "audit: %s%s\n", if (!audit_default)
audit_default ? "enabled" : "disabled", audit_initialized = AUDIT_DISABLED;
audit_initialized ? "" : " (after initialization)");
if (audit_initialized) { printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled");
if (audit_initialized == AUDIT_INITIALIZED) {
audit_enabled = audit_default; audit_enabled = audit_default;
audit_ever_enabled |= !!audit_default; audit_ever_enabled |= !!audit_default;
} else if (audit_initialized == AUDIT_UNINITIALIZED) {
printk(" (after initialization)");
} else {
printk(" (until reboot)");
} }
printk("\n");
return 1; return 1;
} }
@ -1107,9 +1121,7 @@ unsigned int audit_serial(void)
static inline void audit_get_stamp(struct audit_context *ctx, static inline void audit_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial) struct timespec *t, unsigned int *serial)
{ {
if (ctx) if (!ctx || !auditsc_get_stamp(ctx, t, serial)) {
auditsc_get_stamp(ctx, t, serial);
else {
*t = CURRENT_TIME; *t = CURRENT_TIME;
*serial = audit_serial(); *serial = audit_serial();
} }
@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int reserve; int reserve;
unsigned long timeout_start = jiffies; unsigned long timeout_start = jiffies;
if (!audit_initialized) if (audit_initialized != AUDIT_INITIALIZED)
return NULL; return NULL;
if (unlikely(audit_filter_type(type))) if (unlikely(audit_filter_type(type)))

View File

@ -1459,7 +1459,6 @@ void audit_free(struct task_struct *tsk)
/** /**
* audit_syscall_entry - fill in an audit record at syscall entry * audit_syscall_entry - fill in an audit record at syscall entry
* @tsk: task being audited
* @arch: architecture type * @arch: architecture type
* @major: major syscall type (function) * @major: major syscall type (function)
* @a1: additional syscall register 1 * @a1: additional syscall register 1
@ -1548,9 +1547,25 @@ void audit_syscall_entry(int arch, int major,
context->ppid = 0; context->ppid = 0;
} }
void audit_finish_fork(struct task_struct *child)
{
struct audit_context *ctx = current->audit_context;
struct audit_context *p = child->audit_context;
if (!p || !ctx || !ctx->auditable)
return;
p->arch = ctx->arch;
p->major = ctx->major;
memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
p->ctime = ctx->ctime;
p->dummy = ctx->dummy;
p->auditable = ctx->auditable;
p->in_syscall = ctx->in_syscall;
p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
p->ppid = current->pid;
}
/** /**
* audit_syscall_exit - deallocate audit context after a system call * audit_syscall_exit - deallocate audit context after a system call
* @tsk: task being audited
* @valid: success/failure flag * @valid: success/failure flag
* @return_code: syscall return value * @return_code: syscall return value
* *
@ -1942,15 +1957,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child);
* *
* Also sets the context as auditable. * Also sets the context as auditable.
*/ */
void auditsc_get_stamp(struct audit_context *ctx, int auditsc_get_stamp(struct audit_context *ctx,
struct timespec *t, unsigned int *serial) struct timespec *t, unsigned int *serial)
{ {
if (!ctx->in_syscall)
return 0;
if (!ctx->serial) if (!ctx->serial)
ctx->serial = audit_serial(); ctx->serial = audit_serial();
t->tv_sec = ctx->ctime.tv_sec; t->tv_sec = ctx->ctime.tv_sec;
t->tv_nsec = ctx->ctime.tv_nsec; t->tv_nsec = ctx->ctime.tv_nsec;
*serial = ctx->serial; *serial = ctx->serial;
ctx->auditable = 1; ctx->auditable = 1;
return 1;
} }
/* global counter which is incremented every time something logs in */ /* global counter which is incremented every time something logs in */

View File

@ -1398,6 +1398,7 @@ long do_fork(unsigned long clone_flags,
init_completion(&vfork); init_completion(&vfork);
} }
audit_finish_fork(p);
tracehook_report_clone(trace, regs, clone_flags, nr, p); tracehook_report_clone(trace, regs, clone_flags, nr, p);
/* /*

View File

@ -293,7 +293,10 @@ static int svc_listen(struct socket *sock,int backlog)
error = -EINVAL; error = -EINVAL;
goto out; goto out;
} }
vcc_insert_socket(sk); if (test_bit(ATM_VF_LISTEN, &vcc->flags)) {
error = -EADDRINUSE;
goto out;
}
set_bit(ATM_VF_WAITING, &vcc->flags); set_bit(ATM_VF_WAITING, &vcc->flags);
prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE);
sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local); sigd_enq(vcc,as_listen,NULL,NULL,&vcc->local);
@ -307,6 +310,7 @@ static int svc_listen(struct socket *sock,int backlog)
goto out; goto out;
} }
set_bit(ATM_VF_LISTEN,&vcc->flags); set_bit(ATM_VF_LISTEN,&vcc->flags);
vcc_insert_socket(sk);
sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT;
error = -sk->sk_err; error = -sk->sk_err;
out: out:

View File

@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
return n ? d : NULL; return n ? d : NULL;
} }
/**
* find_rcv_list - determine optimal filterlist inside device filter struct
* @can_id: pointer to CAN identifier of a given can_filter
* @mask: pointer to CAN mask of a given can_filter
* @d: pointer to the device filter struct
*
* Description:
* Returns the optimal filterlist to reduce the filter handling in the
* receive path. This function is called by service functions that need
* to register or unregister a can_filter in the filter lists.
*
* A filter matches in general, when
*
* <received_can_id> & mask == can_id & mask
*
* so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
* relevant bits for the filter.
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
* there is a special filterlist and a special rx path filter handling.
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
* Constistency checked mask.
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask, static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
struct dev_rcv_lists *d) struct dev_rcv_lists *d)
{ {
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */ canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
/* filter error frames */ /* filter for error frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) { if (*mask & CAN_ERR_FLAG) {
/* clear CAN_ERR_FLAG in list entry */ /* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK; *mask &= CAN_ERR_MASK;
return &d->rx[RX_ERR]; return &d->rx[RX_ERR];
} }
/* ensure valid values in can_mask */ /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
if (*mask & CAN_EFF_FLAG)
*mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG); #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
else
*mask &= (CAN_SFF_MASK | CAN_RTR_FLAG); /* ensure valid values in can_mask for 'SFF only' frame filtering */
if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
*mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
/* reduce condition testing at receive time */ /* reduce condition testing at receive time */
*can_id &= *mask; *can_id &= *mask;
@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
if (!(*mask)) if (!(*mask))
return &d->rx[RX_ALL]; return &d->rx[RX_ALL];
/* use extra filterset for the subscription of exactly *ONE* can_id */ /* extra filterlists for the subscription of a single non-RTR can_id */
if (*can_id & CAN_EFF_FLAG) { if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) { && !(*can_id & CAN_RTR_FLAG)) {
/* RFC: a use-case for hash-tables in the future? */
return &d->rx[RX_EFF]; if (*can_id & CAN_EFF_FLAG) {
if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
/* RFC: a future use-case for hash-tables? */
return &d->rx[RX_EFF];
}
} else {
if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
return &d->rx_sff[*can_id];
} }
} else {
if (*mask == CAN_SFF_MASK)
return &d->rx_sff[*can_id];
} }
/* default: filter via can_id/can_mask */ /* default: filter via can_id/can_mask */
@ -589,7 +622,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
} }
} }
/* check CAN_ID specific entries */ /* check filterlists for single non-RTR can_ids */
if (can_id & CAN_RTR_FLAG)
return matches;
if (can_id & CAN_EFF_FLAG) { if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) { hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
if (r->can_id == can_id) { if (r->can_id == can_id) {

View File

@ -64,10 +64,11 @@
#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
/* get best masking value for can_rx_register() for a given single can_id */ /* get best masking value for can_rx_register() for a given single can_id */
#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \ #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
(CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK)) (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
#define CAN_BCM_VERSION "20080415" #define CAN_BCM_VERSION CAN_VERSION
static __initdata const char banner[] = KERN_INFO static __initdata const char banner[] = KERN_INFO
"can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n"; "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";

View File

@ -722,8 +722,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now) unsigned int mss_now)
{ {
if (skb->len <= mss_now || !sk_can_gso(sk) || if (skb->len <= mss_now || !sk_can_gso(sk)) {
tcp_urg_mode(tcp_sk(sk))) {
/* Avoid the costly divide in the normal /* Avoid the costly divide in the normal
* non-TSO case. * non-TSO case.
*/ */
@ -1029,10 +1028,6 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
/* Compute the current effective MSS, taking SACKs and IP options, /* Compute the current effective MSS, taking SACKs and IP options,
* and even PMTU discovery events into account. * and even PMTU discovery events into account.
*
* LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up
* cannot be large. However, taking into account rare use of URG, this
* is not a big flaw.
*/ */
unsigned int tcp_current_mss(struct sock *sk, int large_allowed) unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
{ {
@ -1047,7 +1042,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now = tp->mss_cache; mss_now = tp->mss_cache;
if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) if (large_allowed && sk_can_gso(sk))
doing_tso = 1; doing_tso = 1;
if (dst) { if (dst) {
@ -1164,9 +1159,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
{ {
int tso_segs = tcp_skb_pcount(skb); int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
(tso_segs > 1 && (tcp_skb_mss(skb) != mss_now ||
tcp_urg_mode(tcp_sk(sk))))) {
tcp_set_skb_tso_segs(sk, skb, mss_now); tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb); tso_segs = tcp_skb_pcount(skb);
} }
@ -1519,6 +1512,10 @@ static int tcp_mtu_probe(struct sock *sk)
* send_head. This happens as incoming acks open up the remote * send_head. This happens as incoming acks open up the remote
* window for us. * window for us.
* *
* LARGESEND note: !tcp_urg_mode is overkill, only frames between
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
* account rare use of URG, this is not a big flaw.
*
* Returns 1, if no segments are in flight and we have queued segments, but * Returns 1, if no segments are in flight and we have queued segments, but
* cannot send anything now because of SWS or another problem. * cannot send anything now because of SWS or another problem.
*/ */
@ -1570,7 +1567,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
} }
limit = mss_now; limit = mss_now;
if (tso_segs > 1) if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now, limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota); cwnd_quota);
@ -1619,6 +1616,7 @@ void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
*/ */
void tcp_push_one(struct sock *sk, unsigned int mss_now) void tcp_push_one(struct sock *sk, unsigned int mss_now)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk); struct sk_buff *skb = tcp_send_head(sk);
unsigned int tso_segs, cwnd_quota; unsigned int tso_segs, cwnd_quota;
@ -1633,7 +1631,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
BUG_ON(!tso_segs); BUG_ON(!tso_segs);
limit = mss_now; limit = mss_now;
if (tso_segs > 1) if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now, limit = tcp_mss_split_point(sk, skb, mss_now,
cwnd_quota); cwnd_quota);

View File

@ -326,6 +326,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
tp->snd_cwnd = 2; tp->snd_cwnd = 2;
else if (tp->snd_cwnd > tp->snd_cwnd_clamp) else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
tp->snd_cwnd = tp->snd_cwnd_clamp; tp->snd_cwnd = tp->snd_cwnd_clamp;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
} }
/* Wipe the slate clean for the next RTT. */ /* Wipe the slate clean for the next RTT. */

View File

@ -99,7 +99,7 @@ struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]); sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
while (sta) { while (sta) {
if (compare_ether_addr(sta->sta.addr, addr) == 0) if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break; break;
sta = rcu_dereference(sta->hnext); sta = rcu_dereference(sta->hnext);
} }

View File

@ -141,7 +141,7 @@ socket_mt(const struct sk_buff *skb, const struct xt_match_param *par)
sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol, sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport, par->in, false); saddr, daddr, sport, dport, par->in, false);
if (sk != NULL) { if (sk != NULL) {
bool wildcard = (inet_sk(sk)->rcv_saddr == 0); bool wildcard = (sk->sk_state != TCP_TIME_WAIT && inet_sk(sk)->rcv_saddr == 0);
nf_tproxy_put_sock(sk); nf_tproxy_put_sock(sk);
if (wildcard) if (wildcard)

View File

@ -574,9 +574,10 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr, list_entry = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
&iface->addr4_list); &iface->addr4_list);
spin_unlock(&netlbl_unlhsh_lock); spin_unlock(&netlbl_unlhsh_lock);
if (list_entry == NULL) if (list_entry != NULL)
entry = netlbl_unlhsh_addr4_entry(list_entry);
else
ret_val = -ENOENT; ret_val = -ENOENT;
entry = netlbl_unlhsh_addr4_entry(list_entry);
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info); audit_info);
@ -634,9 +635,10 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
spin_lock(&netlbl_unlhsh_lock); spin_lock(&netlbl_unlhsh_lock);
list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list); list_entry = netlbl_af6list_remove(addr, mask, &iface->addr6_list);
spin_unlock(&netlbl_unlhsh_lock); spin_unlock(&netlbl_unlhsh_lock);
if (list_entry == NULL) if (list_entry != NULL)
entry = netlbl_unlhsh_addr6_entry(list_entry);
else
ret_val = -ENOENT; ret_val = -ENOENT;
entry = netlbl_unlhsh_addr6_entry(list_entry);
audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL, audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCDEL,
audit_info); audit_info);

View File

@ -123,6 +123,7 @@ nla_put_failure:
static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb) static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct net *net = sock_net(skb->sk);
struct phonet_device *pnd; struct phonet_device *pnd;
int dev_idx = 0, dev_start_idx = cb->args[0]; int dev_idx = 0, dev_start_idx = cb->args[0];
int addr_idx = 0, addr_start_idx = cb->args[1]; int addr_idx = 0, addr_start_idx = cb->args[1];
@ -131,6 +132,8 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
list_for_each_entry(pnd, &pndevs.list, list) { list_for_each_entry(pnd, &pndevs.list, list) {
u8 addr; u8 addr;
if (!net_eq(dev_net(pnd->netdev), net))
continue;
if (dev_idx > dev_start_idx) if (dev_idx > dev_start_idx)
addr_start_idx = 0; addr_start_idx = 0;
if (dev_idx++ < dev_start_idx) if (dev_idx++ < dev_start_idx)

View File

@ -817,6 +817,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
continue; continue;
hlist_del(&pol->bydst); hlist_del(&pol->bydst);
hlist_del(&pol->byidx); hlist_del(&pol->byidx);
list_del(&pol->walk.all);
write_unlock_bh(&xfrm_policy_lock); write_unlock_bh(&xfrm_policy_lock);
xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,