dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'merge' into next

We want the irq fixes from the "merge" branch.
This commit is contained in:
Benjamin Herrenschmidt 2012-05-14 10:19:22 +10:00
commit 8b6ee04067
275 changed files with 2625 additions and 1549 deletions

View File

@ -1,10 +1,10 @@
* Calxeda SATA Controller * AHCI SATA Controller
SATA nodes are defined to describe on-chip Serial ATA controllers. SATA nodes are defined to describe on-chip Serial ATA controllers.
Each SATA controller should have its own node. Each SATA controller should have its own node.
Required properties: Required properties:
- compatible : compatible list, contains "calxeda,hb-ahci" - compatible : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
- interrupts : <interrupt mapping for SATA IRQ> - interrupts : <interrupt mapping for SATA IRQ>
- reg : <registers mapping> - reg : <registers mapping>
@ -14,4 +14,3 @@ Example:
reg = <0xffe08000 0x1000>; reg = <0xffe08000 0x1000>;
interrupts = <115>; interrupts = <115>;
}; };

View File

@ -3,6 +3,8 @@
Required properties: Required properties:
- compatible : "fsl,sgtl5000". - compatible : "fsl,sgtl5000".
- reg : the I2C address of the device
Example: Example:
codec: sgtl5000@0a { codec: sgtl5000@0a {

View File

@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
if it is <= 0. if it is <= 0.
Possible values are [-31, 31], inclusive. Possible values are [-31, 31], inclusive.
Default: 2 Default: 1
tcp_allowed_congestion_control - STRING tcp_allowed_congestion_control - STRING
Show/set the congestion control choices available to non-privileged Show/set the congestion control choices available to non-privileged
@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
automatic tuning of that socket's receive buffer size, in which automatic tuning of that socket's receive buffer size, in which
case this value is ignored. case this value is ignored.
Default: between 87380B and 4MB, depending on RAM size. Default: between 87380B and 6MB, depending on RAM size.
tcp_sack - BOOLEAN tcp_sack - BOOLEAN
Enable select acknowledgments (SACKS). Enable select acknowledgments (SACKS).

View File

@ -1968,10 +1968,7 @@ S: Maintained
F: drivers/net/ethernet/ti/cpmac.c F: drivers/net/ethernet/ti/cpmac.c
CPU FREQUENCY DRIVERS CPU FREQUENCY DRIVERS
M: Dave Jones <davej@redhat.com>
L: cpufreq@vger.kernel.org L: cpufreq@vger.kernel.org
W: http://www.codemonkey.org.uk/projects/cpufreq/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
S: Maintained S: Maintained
F: drivers/cpufreq/ F: drivers/cpufreq/
F: include/linux/cpufreq.h F: include/linux/cpufreq.h
@ -4037,6 +4034,7 @@ F: Documentation/scsi/53c700.txt
F: drivers/scsi/53c700* F: drivers/scsi/53c700*
LED SUBSYSTEM LED SUBSYSTEM
M: Bryan Wu <bryan.wu@canonical.com>
M: Richard Purdie <rpurdie@rpsys.net> M: Richard Purdie <rpurdie@rpsys.net>
S: Maintained S: Maintained
F: drivers/leds/ F: drivers/leds/
@ -5892,11 +5890,11 @@ F: Documentation/scsi/st.txt
F: drivers/scsi/st* F: drivers/scsi/st*
SCTP PROTOCOL SCTP PROTOCOL
M: Vlad Yasevich <vladislav.yasevich@hp.com> M: Vlad Yasevich <vyasevich@gmail.com>
M: Sridhar Samudrala <sri@us.ibm.com> M: Sridhar Samudrala <sri@us.ibm.com>
L: linux-sctp@vger.kernel.org L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net W: http://lksctp.sourceforge.net
S: Supported S: Maintained
F: Documentation/networking/sctp.txt F: Documentation/networking/sctp.txt
F: include/linux/sctp.h F: include/linux/sctp.h
F: include/net/sctp/ F: include/net/sctp/

View File

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 4 PATCHLEVEL = 4
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Saber-toothed Squirrel NAME = Saber-toothed Squirrel
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK
config VGA_HOSE config VGA_HOSE
bool bool
depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI)
default y default y
help help
Support VGA on an arbitrary hose; needed for several platforms Support VGA on an arbitrary hose; needed for several platforms

View File

@ -1,14 +1,10 @@
#ifndef _ALPHA_RTC_H #ifndef _ALPHA_RTC_H
#define _ALPHA_RTC_H #define _ALPHA_RTC_H
#if defined(CONFIG_ALPHA_GENERIC) #if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \
|| defined(CONFIG_ALPHA_GENERIC)
# define get_rtc_time alpha_mv.rtc_get_time # define get_rtc_time alpha_mv.rtc_get_time
# define set_rtc_time alpha_mv.rtc_set_time # define set_rtc_time alpha_mv.rtc_set_time
#else
# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP)
# define get_rtc_time marvel_get_rtc_time
# define set_rtc_time marvel_set_rtc_time
# endif
#endif #endif
#include <asm-generic/rtc.h> #include <asm-generic/rtc.h>

View File

@ -11,6 +11,7 @@
#include <asm/core_tsunami.h> #include <asm/core_tsunami.h>
#undef __EXTERN_INLINE #undef __EXTERN_INLINE
#include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/sched.h> #include <linux/sched.h>

View File

@ -317,7 +317,7 @@ marvel_init_irq(void)
} }
static int static int
marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ {
struct pci_controller *hose = dev->sysdata; struct pci_controller *hose = dev->sysdata;
struct io7_port *io7_port = hose->sysdata; struct io7_port *io7_port = hose->sysdata;

View File

@ -1186,6 +1186,15 @@ if !MMU
source "arch/arm/Kconfig-nommu" source "arch/arm/Kconfig-nommu"
endif endif
config ARM_ERRATA_326103
bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
depends on CPU_V6
help
Executing a SWP instruction to read-only memory does not set bit 11
of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to
treat the access as a read, preventing a COW from occurring and
causing the faulting task to livelock.
config ARM_ERRATA_411920 config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail" bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
depends on CPU_V6 || CPU_V6K depends on CPU_V6 || CPU_V6K

View File

@ -173,7 +173,7 @@
mmc@5000 { mmc@5000 {
compatible = "arm,primecell"; compatible = "arm,primecell";
reg = < 0x5000 0x1000>; reg = < 0x5000 0x1000>;
interrupts = <22>; interrupts = <22 34>;
}; };
kmi@6000 { kmi@6000 {
compatible = "arm,pl050", "arm,primecell"; compatible = "arm,pl050", "arm,primecell";

View File

@ -41,7 +41,7 @@
mmc@b000 { mmc@b000 {
compatible = "arm,primecell"; compatible = "arm,primecell";
reg = <0xb000 0x1000>; reg = <0xb000 0x1000>;
interrupts = <23>; interrupts = <23 34>;
}; };
}; };
}; };

View File

@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *);
extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *);
extern void vfp_flush_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
extern int vfp_restore_user_hwstate(struct user_vfp __user *,
struct user_vfp_exc __user *);
#endif #endif
/* /*

View File

@ -7,6 +7,8 @@
.macro set_tls_v6k, tp, tmp1, tmp2 .macro set_tls_v6k, tp, tmp1, tmp2
mcr p15, 0, \tp, c13, c0, 3 @ set TLS register mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
mov \tmp1, #0
mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
.endm .endm
.macro set_tls_v6, tp, tmp1, tmp2 .macro set_tls_v6, tp, tmp1, tmp2
@ -15,6 +17,8 @@
mov \tmp2, #0xffff0fff mov \tmp2, #0xffff0fff
tst \tmp1, #HWCAP_TLS @ hardware TLS available? tst \tmp1, #HWCAP_TLS @ hardware TLS available?
mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
movne \tmp1, #0
mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
.endm .endm

View File

@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc)
} }
c = irq_data_get_irq_chip(d); c = irq_data_get_irq_chip(d);
if (c->irq_set_affinity) if (!c->irq_set_affinity)
c->irq_set_affinity(d, affinity, true);
else
pr_debug("IRQ%u: unable to set affinity\n", d->irq); pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret; return ret;
} }

View File

@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
return ret; return ret;
} }
#ifdef __ARMEB__
#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
#else
#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
#endif
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{ {
unsigned long ip; unsigned long ip;
/* if (why)
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
if (!ip)
audit_syscall_exit(regs); audit_syscall_exit(regs);
else else
audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
if (!test_thread_flag(TIF_SYSCALL_TRACE)) if (!test_thread_flag(TIF_SYSCALL_TRACE))
@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
current_thread_info()->syscall = scno; current_thread_info()->syscall = scno;
/*
* IP is used to denote syscall entry/exit:
* IP = 0 -> entry, =1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
/* the 0x80 provides a way for the tracing parent to distinguish /* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */ between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)

View File

@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
static int preserve_vfp_context(struct vfp_sigframe __user *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{ {
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
const unsigned long magic = VFP_MAGIC; const unsigned long magic = VFP_MAGIC;
const unsigned long size = VFP_STORAGE_SIZE; const unsigned long size = VFP_STORAGE_SIZE;
int err = 0; int err = 0;
vfp_sync_hwstate(thread);
__put_user_error(magic, &frame->magic, err); __put_user_error(magic, &frame->magic, err);
__put_user_error(size, &frame->size, err); __put_user_error(size, &frame->size, err);
/* if (err)
* Copy the floating point registers. There can be unused return -EFAULT;
* registers see asm/hwcap.h for details.
*/
err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
/* return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
* Copy the exception registers.
*/
__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
return err ? -EFAULT : 0;
} }
static int restore_vfp_context(struct vfp_sigframe __user *frame) static int restore_vfp_context(struct vfp_sigframe __user *frame)
{ {
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
unsigned long magic; unsigned long magic;
unsigned long size; unsigned long size;
unsigned long fpexc;
int err = 0; int err = 0;
__get_user_error(magic, &frame->magic, err); __get_user_error(magic, &frame->magic, err);
@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL; return -EINVAL;
vfp_flush_hwstate(thread); return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
h->fpexc = fpexc;
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
return err ? -EFAULT : 0;
} }
#endif #endif

View File

@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
printk("CPU%u: Booted secondary processor\n", cpu);
/* /*
* All kernel threads share the same mm context; grab a * All kernel threads share the same mm context; grab a
* reference and switch to it. * reference and switch to it.
@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
enter_lazy_tlb(mm, current); enter_lazy_tlb(mm, current);
local_flush_tlb_all(); local_flush_tlb_all();
printk("CPU%u: Booted secondary processor\n", cpu);
cpu_init(); cpu_init();
preempt_disable(); preempt_disable();
trace_hardirqs_off(); trace_hardirqs_off();
@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable(); local_fiq_disable();
local_irq_disable(); local_irq_disable();
#ifdef CONFIG_HOTPLUG_CPU
platform_cpu_kill(cpu);
#endif
while (1) while (1)
cpu_relax(); cpu_relax();
} }
@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu)
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
} }
#ifdef CONFIG_HOTPLUG_CPU
static void smp_kill_cpus(cpumask_t *mask)
{
unsigned int cpu;
for_each_cpu(cpu, mask)
platform_cpu_kill(cpu);
}
#else
static void smp_kill_cpus(cpumask_t *mask) { }
#endif
void smp_send_stop(void) void smp_send_stop(void)
{ {
unsigned long timeout; unsigned long timeout;
if (num_online_cpus() > 1) {
struct cpumask mask; struct cpumask mask;
cpumask_copy(&mask, cpu_online_mask); cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask); cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP); smp_cross_call(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */ /* Wait up to one second for other CPUs to stop */
timeout = USEC_PER_SEC; timeout = USEC_PER_SEC;
@ -595,6 +599,8 @@ void smp_send_stop(void)
if (num_online_cpus() > 1) if (num_online_cpus() > 1)
pr_warning("SMP: failed to stop secondary CPUs\n"); pr_warning("SMP: failed to stop secondary CPUs\n");
smp_kill_cpus(&mask);
} }
/* /*

View File

@ -115,7 +115,7 @@ int kernel_execve(const char *filename,
"Ir" (THREAD_START_SP - sizeof(regs)), "Ir" (THREAD_START_SP - sizeof(regs)),
"r" (&regs), "r" (&regs),
"Ir" (sizeof(regs)) "Ir" (sizeof(regs))
: "r0", "r1", "r2", "r3", "ip", "lr", "memory"); : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory");
out: out:
return ret; return ret;

View File

@ -14,6 +14,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/kexec.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
#include <mach/bridge-regs.h> #include <mach/bridge-regs.h>

View File

@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id)
struct irq_chip *irq_chip = NULL; struct irq_chip *irq_chip = NULL;
int gpio, irq_num, fiq_count; int gpio, irq_num, fiq_count;
irq_desc = irq_to_desc(IH_GPIO_BASE); irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK));
if (irq_desc) if (irq_desc)
irq_chip = irq_desc->irq_data.chip; irq_chip = irq_desc->irq_data.chip;

View File

@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = {
static void __init igep_init(void) static void __init igep_init(void)
{ {
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
/* Get IGEP2 hardware revision */ /* Get IGEP2 hardware revision */

View File

@ -941,10 +941,10 @@
#define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29)
#define OMAP4_DSI1_LANEENABLE_SHIFT 24 #define OMAP4_DSI1_LANEENABLE_SHIFT 24
#define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24)
#define OMAP4_DSI2_PIPD_SHIFT 19 #define OMAP4_DSI1_PIPD_SHIFT 19
#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) #define OMAP4_DSI1_PIPD_MASK (0x1f << 19)
#define OMAP4_DSI1_PIPD_SHIFT 14 #define OMAP4_DSI2_PIPD_SHIFT 14
#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) #define OMAP4_DSI2_PIPD_MASK (0x1f << 14)
/* CONTROL_MCBSPLP */ /* CONTROL_MCBSPLP */
#define OMAP4_ALBCTRLRX_FSX_SHIFT 31 #define OMAP4_ALBCTRLRX_FSX_SHIFT 31

View File

@ -65,8 +65,8 @@
#define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1)
#define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1)
#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) #define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1)
#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) #define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1)
#define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1)
#define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1)

View File

@ -26,18 +26,23 @@ ENTRY(v6_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR mrc p15, 0, r0, c6, c0, 0 @ get FAR
/* /*
* Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR.
* The test below covers all the write situations, including Java bytecodes
*/ */
bic r1, r1, #1 << 11 @ clear bit 11 of FSR #ifdef CONFIG_ARM_ERRATA_326103
tst r5, #PSR_J_BIT @ Java? ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
bne do_DataAbort bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 tst r5, #PSR_J_BIT @ Java?
ldreq r3, [r4] @ read aborted ARM instruction tsteq r5, #PSR_T_BIT @ Thumb?
bne do_DataAbort
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
#ifdef CONFIG_CPU_ENDIAN_BE8 #ifdef CONFIG_CPU_ENDIAN_BE8
reveq r3, r3 rev r3, r3
#endif #endif
do_ldrd_abort tmp=ip, insn=r3 do_ldrd_abort tmp=ip, insn=r3
tst r3, #1 << 20 @ L = 0 -> write tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes. orreq r1, r1, #1 << 11 @ yes.
#endif
b do_DataAbort b do_DataAbort

View File

@ -32,6 +32,7 @@ static void __iomem *l2x0_base;
static DEFINE_RAW_SPINLOCK(l2x0_lock); static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size; static u32 l2x0_size;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs; struct l2x0_regs l2x0_saved_regs;
@ -61,12 +62,7 @@ static inline void cache_sync(void)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
#ifdef CONFIG_PL310_ERRATA_753970 writel_relaxed(0, base + sync_reg_offset);
/* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
writel_relaxed(0, base + L2X0_CACHE_SYNC);
#endif
cache_wait(base + L2X0_CACHE_SYNC, 1); cache_wait(base + L2X0_CACHE_SYNC, 1);
} }
@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr)
} }
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
static inline void debug_writel(unsigned long val)
{
if (outer_cache.set_debug)
outer_cache.set_debug(val);
}
#define debug_writel(val) outer_cache.set_debug(val) static void pl310_set_debug(unsigned long val)
static void l2x0_set_debug(unsigned long val)
{ {
writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
} }
@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val)
{ {
} }
#define l2x0_set_debug NULL #define pl310_set_debug NULL
#endif #endif
#ifdef CONFIG_PL310_ERRATA_588369 #ifdef CONFIG_PL310_ERRATA_588369
@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
else else
ways = 8; ways = 8;
type = "L310"; type = "L310";
#ifdef CONFIG_PL310_ERRATA_753970
/* Unmapped register. */
sync_reg_offset = L2X0_DUMMY_REG;
#endif
outer_cache.set_debug = pl310_set_debug;
break; break;
case L2X0_CACHE_ID_PART_L210: case L2X0_CACHE_ID_PART_L210:
ways = (aux >> 13) & 0xf; ways = (aux >> 13) & 0xf;
@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
outer_cache.flush_all = l2x0_flush_all; outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all; outer_cache.inv_all = l2x0_inv_all;
outer_cache.disable = l2x0_disable; outer_cache.disable = l2x0_disable;
outer_cache.set_debug = l2x0_set_debug;
printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",

View File

@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid);
#endif #endif
#ifndef CONFIG_SPARSEMEM #ifndef CONFIG_SPARSEMEM
static void arm_memory_present(void) static void __init arm_memory_present(void)
{ {
} }
#else #else
static void arm_memory_present(void) static void __init arm_memory_present(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;

View File

@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
} }
} }
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
unsigned long phys, const struct mem_type *type) unsigned long end, unsigned long phys, const struct mem_type *type)
{ {
pud_t *pud = pud_offset(pgd, addr); pud_t *pud = pud_offset(pgd, addr);
unsigned long next; unsigned long next;

View File

@ -916,6 +916,13 @@ void omap_start_dma(int lch)
l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
l |= OMAP_DMA_CCR_EN; l |= OMAP_DMA_CCR_EN;
/*
* As dma_write() uses IO accessors which are weakly ordered, there
* is no guarantee that data in coherent DMA memory will be visible
* to the DMA device. Add a memory barrier here to ensure that any
* such data is visible prior to enabling DMA.
*/
mb();
p->dma_write(l, CCR, lch); p->dma_write(l, CCR, lch);
dma_chan[lch].flags |= OMAP_DMA_ACTIVE; dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
@ -965,6 +972,13 @@ void omap_stop_dma(int lch)
p->dma_write(l, CCR, lch); p->dma_write(l, CCR, lch);
} }
/*
* Ensure that data transferred by DMA is visible to any access
* after DMA has been disabled. This is important for coherent
* DMA regions.
*/
mb();
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch; int next_lch, cur_lch = lch;
char dma_chan_link_map[dma_lch_count]; char dma_chan_link_map[dma_lch_count];

View File

@ -17,6 +17,8 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/user.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/cputype.h> #include <asm/cputype.h>
@ -528,6 +530,103 @@ void vfp_flush_hwstate(struct thread_info *thread)
put_cpu(); put_cpu();
} }
/*
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
struct user_vfp_exc __user *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
int err = 0;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
__put_user_error(hwstate->fpscr, &ufp->fpscr, err);
/*
* Copy the exception registers.
*/
__put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
__put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
__put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
if (err)
return -EFAULT;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
/*
* As per the PCS, clear the length and stride bits for function
* entry.
*/
hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK);
/*
* Disable VFP in the hwstate so that we can detect if it gets
* used.
*/
hwstate->fpexc &= ~FPEXC_EN;
return 0;
}
/* Sanitise and restore the current VFP state from the provided structures. */
int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
struct user_vfp_exc __user *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
unsigned long fpexc;
int err = 0;
/*
* If VFP has been used, then disable it to avoid corrupting
* the new thread state.
*/
if (hwstate->fpexc & FPEXC_EN)
vfp_flush_hwstate(thread);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs,
sizeof(hwstate->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(hwstate->fpscr, &ufp->fpscr, err);
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &ufp_exc->fpexc, err);
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
hwstate->fpexc = fpexc;
__get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
__get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
return err ? -EFAULT : 0;
}
/* /*
* VFP hardware can lose all context when a CPU goes offline. * VFP hardware can lose all context when a CPU goes offline.
* As we will be running in SMP mode with CPU hotplug, we will save the * As we will be running in SMP mode with CPU hotplug, we will save the

View File

@ -1174,7 +1174,7 @@ out:
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{ {
return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
} }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)

View File

@ -22,7 +22,7 @@
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m520x_qspi_init(void) static void __init m520x_qspi_init(void)
{ {
@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void)
writew(par, MCF_GPIO_PAR_UART); writew(par, MCF_GPIO_PAR_UART);
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init; mach_sched_init = hw_timer_init;
m520x_uarts_init(); m520x_uarts_init();
m520x_fec_init(); m520x_fec_init();
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m520x_qspi_init(); m520x_qspi_init();
#endif #endif
} }

View File

@ -22,7 +22,7 @@
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m523x_qspi_init(void) static void __init m523x_qspi_init(void)
{ {
@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void)
writew(par, MCFGPIO_PAR_TIMER); writew(par, MCFGPIO_PAR_TIMER);
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size)
{ {
mach_sched_init = hw_timer_init; mach_sched_init = hw_timer_init;
m523x_fec_init(); m523x_fec_init();
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m523x_qspi_init(); m523x_qspi_init();
#endif #endif
} }

View File

@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = {
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m5249_qspi_init(void) static void __init m5249_qspi_init(void)
{ {
@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void)
mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
#ifdef CONFIG_M5249C3 #ifdef CONFIG_M5249C3
m5249_smc91x_init(); m5249_smc91x_init();
#endif #endif
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m5249_qspi_init(); m5249_qspi_init();
#endif #endif
} }

View File

@ -23,7 +23,7 @@
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m527x_qspi_init(void) static void __init m527x_qspi_init(void)
{ {
@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void)
#endif #endif
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init; mach_sched_init = hw_timer_init;
m527x_uarts_init(); m527x_uarts_init();
m527x_fec_init(); m527x_fec_init();
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m527x_qspi_init(); m527x_qspi_init();
#endif #endif
} }

View File

@ -24,7 +24,7 @@
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m528x_qspi_init(void) static void __init m528x_qspi_init(void)
{ {
@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void)
__raw_writeb(0x07, MCFGPIO_PQSPAR); __raw_writeb(0x07, MCFGPIO_PQSPAR);
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init; mach_sched_init = hw_timer_init;
m528x_uarts_init(); m528x_uarts_init();
m528x_fec_init(); m528x_fec_init();
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m528x_qspi_init(); m528x_qspi_init();
#endif #endif
} }

View File

@ -30,7 +30,7 @@
/***************************************************************************/ /***************************************************************************/
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
static void __init m532x_qspi_init(void) static void __init m532x_qspi_init(void)
{ {
@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void)
writew(0x01f0, MCF_GPIO_PAR_QSPI); writew(0x01f0, MCF_GPIO_PAR_QSPI);
} }
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
/***************************************************************************/ /***************************************************************************/
@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init; mach_sched_init = hw_timer_init;
m532x_uarts_init(); m532x_uarts_init();
m532x_fec_init(); m532x_fec_init();
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
m532x_qspi_init(); m532x_qspi_init();
#endif #endif

View File

@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = {
#endif /* MCFFEC_BASE1 */ #endif /* MCFFEC_BASE1 */
#endif /* CONFIG_FEC */ #endif /* CONFIG_FEC */
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
/* /*
* The ColdFire QSPI module is an SPI protocol hardware block used * The ColdFire QSPI module is an SPI protocol hardware block used
* on a number of different ColdFire CPUs. * on a number of different ColdFire CPUs.
@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = {
.resource = mcf_qspi_resources, .resource = mcf_qspi_resources,
.dev.platform_data = &mcf_qspi_data, .dev.platform_data = &mcf_qspi_data,
}; };
#endif /* CONFIG_SPI_COLDFIRE_QSPI */ #endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
static struct platform_device *mcf_devices[] __initdata = { static struct platform_device *mcf_devices[] __initdata = {
&mcf_uart, &mcf_uart,
@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = {
&mcf_fec1, &mcf_fec1,
#endif #endif
#endif #endif
#ifdef CONFIG_SPI_COLDFIRE_QSPI #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
&mcf_qspi, &mcf_qspi,
#endif #endif
}; };

View File

@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void)
static int ar933x_wmac_reset(void) static int ar933x_wmac_reset(void)
{ {
ath79_device_reset_clear(AR933X_RESET_WMAC);
ath79_device_reset_set(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC);
ath79_device_reset_clear(AR933X_RESET_WMAC);
return 0; return 0;
} }

View File

@ -45,7 +45,7 @@
#define JZ4740_IRQ_LCD JZ4740_IRQ(30) #define JZ4740_IRQ_LCD JZ4740_IRQ(30)
/* 2nd-level interrupts */ /* 2nd-level interrupts */
#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) #define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x))
#define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x))
#define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x))

View File

@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
} while (0) } while (0)
static inline unsigned long get_current_pgd(void)
{
return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
}
#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
/* /*

View File

@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -EFAULT; return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked; current->saved_sigmask = current->blocked;
current->blocked = newset; set_current_blocked(&newset);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule(); schedule();
@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -EFAULT; return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked; current->saved_sigmask = current->blocked;
current->blocked = newset; set_current_blocked(&newset);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule(); schedule();
@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
goto badframe; goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE); sigdelsetmask(&blocked, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); set_current_blocked(&blocked);
current->blocked = blocked;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sig = restore_sigcontext(&regs, &frame->sf_sc); sig = restore_sigcontext(&regs, &frame->sf_sc);
if (sig < 0) if (sig < 0)
@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
goto badframe; goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); set_current_blocked(&set);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0) if (sig < 0)
@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
if (ret) if (ret)
return ret; return ret;
spin_lock_irq(&current->sighand->siglock); block_sigmask(ka, sig);
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, sig);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return ret; return ret;
} }

View File

@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -EFAULT; return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked; current->saved_sigmask = current->blocked;
current->blocked = newset; set_current_blocked(&newset);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule(); schedule();
@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
return -EFAULT; return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked; current->saved_sigmask = current->blocked;
current->blocked = newset; set_current_blocked(&newset);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule(); schedule();
@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
goto badframe; goto badframe;
sigdelsetmask(&blocked, ~_BLOCKABLE); sigdelsetmask(&blocked, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); set_current_blocked(&blocked);
current->blocked = blocked;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sig = restore_sigcontext32(&regs, &frame->sf_sc); sig = restore_sigcontext32(&regs, &frame->sf_sc);
if (sig < 0) if (sig < 0)
@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
goto badframe; goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); set_current_blocked(&set);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext); sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0) if (sig < 0)

View File

@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
sigset_from_compat(&newset, &uset); sigset_from_compat(&newset, &uset);
sigdelsetmask(&newset, ~_BLOCKABLE); sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked; current->saved_sigmask = current->blocked;
current->blocked = newset; set_current_blocked(&newset);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
schedule(); schedule();
@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
goto badframe; goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE); sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock); set_current_blocked(&set);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
if (sig < 0) if (sig < 0)

View File

@ -2,7 +2,6 @@
#define _PARISC_HARDWARE_H #define _PARISC_HARDWARE_H
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <asm/pdc.h>
#define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID
#define HVERSION_ANY_ID PA_HVERSION_ANY_ID #define HVERSION_ANY_ID PA_HVERSION_ANY_ID
@ -95,12 +94,14 @@ struct bc_module {
#define HPHW_MC 15 #define HPHW_MC 15
#define HPHW_FAULTY 31 #define HPHW_FAULTY 31
struct parisc_device_id;
/* hardware.c: */ /* hardware.c: */
extern const char *parisc_hardware_description(struct parisc_device_id *id); extern const char *parisc_hardware_description(struct parisc_device_id *id);
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
struct pci_dev; struct pci_dev;
struct hardware_path;
/* drivers.c: */ /* drivers.c: */
extern struct parisc_device *alloc_pa_dev(unsigned long hpa, extern struct parisc_device *alloc_pa_dev(unsigned long hpa,

View File

@ -160,5 +160,11 @@ extern int npmem_ranges;
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h> #include <asm-generic/getorder.h>
#include <asm/pdc.h>
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */
#endif /* _PARISC_PAGE_H */ #endif /* _PARISC_PAGE_H */

View File

@ -343,8 +343,6 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/page.h> /* for __PAGE_OFFSET */
extern int pdc_type; extern int pdc_type;
/* Values for pdc_type */ /* Values for pdc_type */
@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) {
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
/* DEFINITION OF THE ZERO-PAGE (PAG0) */
/* based on work by Jason Eckhardt (jason@equator.com) */
/* flags of the device_path */ /* flags of the device_path */
#define PF_AUTOBOOT 0x80 #define PF_AUTOBOOT 0x80
#define PF_AUTOSEARCH 0x40 #define PF_AUTOSEARCH 0x40

View File

@ -44,6 +44,8 @@ struct vm_area_struct;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#include <asm/page.h>
#define pte_ERROR(e) \ #define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \ #define pmd_ERROR(e) \

View File

@ -1,6 +1,8 @@
#ifndef __ASM_SPINLOCK_H #ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H
#include <asm/barrier.h>
#include <asm/ldcw.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/spinlock_types.h> #include <asm/spinlock_types.h>

View File

@ -50,6 +50,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/major.h> #include <linux/major.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <asm/page.h> /* for PAGE0 */
#include <asm/pdc.h> /* for iodc_call() proto and friends */ #include <asm/pdc.h> /* for iodc_call() proto and friends */
static DEFINE_SPINLOCK(pdc_console_lock); static DEFINE_SPINLOCK(pdc_console_lock);
@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp)
static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp)
{ {
if (!tty->count) { if (tty->count == 1) {
del_timer_sync(&pdc_console_timer); del_timer_sync(&pdc_console_timer);
tty_port_tty_set(&tty_port, NULL); tty_port_tty_set(&tty_port, NULL);
} }

View File

@ -29,6 +29,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/page.h>
#include <asm/param.h> #include <asm/param.h>
#include <asm/pdc.h> #include <asm/pdc.h>
#include <asm/led.h> #include <asm/led.h>

View File

@ -584,23 +584,19 @@ _GLOBAL(ret_from_except_lite)
fast_exc_return_irq: fast_exc_return_irq:
restore: restore:
/* /*
* This is the main kernel exit path, we first check if we * This is the main kernel exit path. First we check if we
* have to change our interrupt state. * are about to re-enable interrupts
*/ */
ld r5,SOFTE(r1) ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13) lbz r6,PACASOFTIRQEN(r13)
cmpwi cr1,r5,0 cmpwi cr0,r5,0
cmpw cr0,r5,r6 beq restore_irq_off
beq cr0,4f
/* We do, handle disable first, which is easy */ /* We are enabling, were we already enabled ? Yes, just return */
bne cr1,3f; cmpwi cr0,r6,1
li r0,0 beq cr0,do_restore
stb r0,PACASOFTIRQEN(r13);
TRACE_DISABLE_INTS
b 4f
3: /* /*
* We are about to soft-enable interrupts (we are hard disabled * We are about to soft-enable interrupts (we are hard disabled
* at this point). We check if there's anything that needs to * at this point). We check if there's anything that needs to
* be replayed first. * be replayed first.
@ -622,7 +618,7 @@ restore_no_replay:
/* /*
* Final return path. BookE is handled in a different file * Final return path. BookE is handled in a different file
*/ */
4: do_restore:
#ifdef CONFIG_PPC_BOOK3E #ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e b .exception_return_book3e
#else #else
@ -695,6 +691,25 @@ fast_exception_return:
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
/*
* We are returning to a context with interrupts soft disabled.
*
* However, we may also about to hard enable, so we need to
* make sure that in this case, we also clear PACA_IRQ_HARD_DIS
* or that bit can get out of sync and bad things will happen
*/
restore_irq_off:
ld r3,_MSR(r1)
lbz r7,PACAIRQHAPPENED(r13)
andi. r0,r3,MSR_EE
beq 1f
rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
stb r7,PACAIRQHAPPENED(r13)
1: li r0,0
stb r0,PACASOFTIRQEN(r13);
TRACE_DISABLE_INTS
b do_restore
/* /*
* Something did happen, check if a re-emit is needed * Something did happen, check if a re-emit is needed
* (this also clears paca->irq_happened) * (this also clears paca->irq_happened)
@ -744,6 +759,9 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
1: b .ret_from_except /* What else to do here ? */ 1: b .ret_from_except /* What else to do here ? */
3:
do_work: do_work:
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */ andi. r0,r3,MSR_PR /* Returning to user mode? */

View File

@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
*/ */
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable(); __hard_irq_disable();
#ifdef CONFIG_TRACE_IRQFLAG
else {
/*
* We should already be hard disabled here. We had bugs
* where that wasn't the case so let's dbl check it and
* warn if we are wrong. Only do that when IRQ tracing
* is enabled as mfmsr() can be costly.
*/
if (WARN_ON(mfmsr() & MSR_EE))
__hard_irq_disable();
}
#endif /* CONFIG_TRACE_IRQFLAG */
set_soft_enabled(0); set_soft_enabled(0);
/* /*

View File

@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
!(memslot->userspace_addr & (s - 1))) { !(memslot->userspace_addr & (s - 1))) {
start &= ~(s - 1); start &= ~(s - 1);
pgsize = s; pgsize = s;
get_page(hpage);
put_page(page);
page = hpage; page = hpage;
} }
} }
@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
err = 0; err = 0;
out: out:
if (got) { if (got)
if (PageHuge(page))
page = compound_head(page);
put_page(page); put_page(page);
}
return err; return err;
up_err: up_err:
@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page); SetPageDirty(page);
out_put: out_put:
if (page) if (page) {
put_page(page); /*
* We drop pages[0] here, not page because page might
* have been set to the head page of a compound, but
* we have to drop the reference on the correct tail
* page to match the get inside gup()
*/
put_page(pages[0]);
}
return ret; return ret;
out_unlock: out_unlock:
@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
pa = *physp; pa = *physp;
} }
page = pfn_to_page(pa >> PAGE_SHIFT); page = pfn_to_page(pa >> PAGE_SHIFT);
get_page(page);
} else { } else {
hva = gfn_to_hva_memslot(memslot, gfn); hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages); npages = get_user_pages_fast(hva, 1, 1, pages);
@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
page = compound_head(page); page = compound_head(page);
psize <<= compound_order(page); psize <<= compound_order(page);
} }
if (!kvm->arch.using_mmu_notifiers)
get_page(page);
offset = gpa & (psize - 1); offset = gpa & (psize - 1);
if (nb_ret) if (nb_ret)
*nb_ret = psize - offset; *nb_ret = psize - offset;
@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{ {
struct page *page = virt_to_page(va); struct page *page = virt_to_page(va);
page = compound_head(page);
put_page(page); put_page(page);
} }

View File

@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
continue; continue;
pfn = physp[j] >> PAGE_SHIFT; pfn = physp[j] >> PAGE_SHIFT;
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (PageHuge(page))
page = compound_head(page);
SetPageDirty(page); SetPageDirty(page);
put_page(page); put_page(page);
} }

View File

@ -48,7 +48,13 @@
/* /*
* Assembly helpers from arch/powerpc/net/bpf_jit.S: * Assembly helpers from arch/powerpc/net/bpf_jit.S:
*/ */
extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; #define DECLARE_LOAD_FUNC(func) \
extern u8 func[], func##_negative_offset[], func##_positive_offset[]
DECLARE_LOAD_FUNC(sk_load_word);
DECLARE_LOAD_FUNC(sk_load_half);
DECLARE_LOAD_FUNC(sk_load_byte);
DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define FUNCTION_DESCR_SIZE 24 #define FUNCTION_DESCR_SIZE 24

View File

@ -31,14 +31,13 @@
* then branch directly to slow_path_XXX if required. (In fact, could * then branch directly to slow_path_XXX if required. (In fact, could
* load a spare GPR with the address of slow_path_generic and pass size * load a spare GPR with the address of slow_path_generic and pass size
* as an argument, making the call site a mtlr, li and bllr.) * as an argument, making the call site a mtlr, li and bllr.)
*
* Technically, the "is addr < 0" check is unnecessary & slowing down
* the ABS path, as it's statically checked on generation.
*/ */
.globl sk_load_word .globl sk_load_word
sk_load_word: sk_load_word:
cmpdi r_addr, 0 cmpdi r_addr, 0
blt bpf_error blt bpf_slow_path_word_neg
.globl sk_load_word_positive_offset
sk_load_word_positive_offset:
/* Are we accessing past headlen? */ /* Are we accessing past headlen? */
subi r_scratch1, r_HL, 4 subi r_scratch1, r_HL, 4
cmpd r_scratch1, r_addr cmpd r_scratch1, r_addr
@ -51,7 +50,9 @@ sk_load_word:
.globl sk_load_half .globl sk_load_half
sk_load_half: sk_load_half:
cmpdi r_addr, 0 cmpdi r_addr, 0
blt bpf_error blt bpf_slow_path_half_neg
.globl sk_load_half_positive_offset
sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2 subi r_scratch1, r_HL, 2
cmpd r_scratch1, r_addr cmpd r_scratch1, r_addr
blt bpf_slow_path_half blt bpf_slow_path_half
@ -61,7 +62,9 @@ sk_load_half:
.globl sk_load_byte .globl sk_load_byte
sk_load_byte: sk_load_byte:
cmpdi r_addr, 0 cmpdi r_addr, 0
blt bpf_error blt bpf_slow_path_byte_neg
.globl sk_load_byte_positive_offset
sk_load_byte_positive_offset:
cmpd r_HL, r_addr cmpd r_HL, r_addr
ble bpf_slow_path_byte ble bpf_slow_path_byte
lbzx r_A, r_D, r_addr lbzx r_A, r_D, r_addr
@ -69,22 +72,20 @@ sk_load_byte:
/* /*
* BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
* r_addr is the offset value, already known positive * r_addr is the offset value
*/ */
.globl sk_load_byte_msh .globl sk_load_byte_msh
sk_load_byte_msh: sk_load_byte_msh:
cmpdi r_addr, 0
blt bpf_slow_path_byte_msh_neg
.globl sk_load_byte_msh_positive_offset
sk_load_byte_msh_positive_offset:
cmpd r_HL, r_addr cmpd r_HL, r_addr
ble bpf_slow_path_byte_msh ble bpf_slow_path_byte_msh
lbzx r_X, r_D, r_addr lbzx r_X, r_D, r_addr
rlwinm r_X, r_X, 2, 32-4-2, 31-2 rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr blr
bpf_error:
/* Entered with cr0 = lt */
li r3, 0
/* Generated code will 'blt epilogue', returning 0. */
blr
/* Call out to skb_copy_bits: /* Call out to skb_copy_bits:
* We'll need to back up our volatile regs first; we have * We'll need to back up our volatile regs first; we have
* local variable space at r1+(BPF_PPC_STACK_BASIC). * local variable space at r1+(BPF_PPC_STACK_BASIC).
@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
rlwinm r_X, r_X, 2, 32-4-2, 31-2 rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr blr
/* Call out to bpf_internal_load_pointer_neg_helper:
* We'll need to back up our volatile regs first; we have
* local variable space at r1+(BPF_PPC_STACK_BASIC).
* Allocate a new stack frame here to remain ABI-compliant in
* stashing LR.
*/
#define sk_negative_common(SIZE) \
mflr r0; \
std r0, 16(r1); \
/* R3 goes in parameter space of caller's frame */ \
std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
/* R3 = r_skb, as passed */ \
mr r4, r_addr; \
li r5, SIZE; \
bl bpf_internal_load_pointer_neg_helper; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
mtlr r0; \
cmpldi r3, 0; \
beq bpf_error_slow; /* cr0 = EQ */ \
mr r_addr, r3; \
ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
/* Great success! */
bpf_slow_path_word_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_word_negative_offset
sk_load_word_negative_offset:
sk_negative_common(4)
lwz r_A, 0(r_addr)
blr
bpf_slow_path_half_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_half_negative_offset
sk_load_half_negative_offset:
sk_negative_common(2)
lhz r_A, 0(r_addr)
blr
bpf_slow_path_byte_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_negative_offset
sk_load_byte_negative_offset:
sk_negative_common(1)
lbz r_A, 0(r_addr)
blr
bpf_slow_path_byte_msh_neg:
lis r_scratch1,-32 /* SKF_LL_OFF */
cmpd r_addr, r_scratch1 /* addr < SKF_* */
blt bpf_error /* cr0 = LT */
.globl sk_load_byte_msh_negative_offset
sk_load_byte_msh_negative_offset:
sk_negative_common(1)
lbz r_X, 0(r_addr)
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
bpf_error_slow:
/* fabricate a cr0 = lt */
li r_scratch1, -1
cmpdi r_scratch1, 0
bpf_error:
/* Entered with cr0 = lt */
li r3, 0
/* Generated code will 'blt epilogue', returning 0. */
blr

View File

@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR(); PPC_BLR();
} }
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
/* Assemble the body code between the prologue & epilogue. */ /* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
struct codegen_context *ctx, struct codegen_context *ctx,
@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
/*** Absolute loads from packet header/data ***/ /*** Absolute loads from packet header/data ***/
case BPF_S_LD_W_ABS: case BPF_S_LD_W_ABS:
func = sk_load_word; func = CHOOSE_LOAD_FUNC(K, sk_load_word);
goto common_load; goto common_load;
case BPF_S_LD_H_ABS: case BPF_S_LD_H_ABS:
func = sk_load_half; func = CHOOSE_LOAD_FUNC(K, sk_load_half);
goto common_load; goto common_load;
case BPF_S_LD_B_ABS: case BPF_S_LD_B_ABS:
func = sk_load_byte; func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
common_load: common_load:
/* /* Load from [K]. */
* Load from [K]. Reference with the (negative)
* SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
*/
ctx->seen |= SEEN_DATAREF; ctx->seen |= SEEN_DATAREF;
if ((int)K < 0)
return -ENOTSUPP;
PPC_LI64(r_scratch1, func); PPC_LI64(r_scratch1, func);
PPC_MTLR(r_scratch1); PPC_MTLR(r_scratch1);
PPC_LI32(r_addr, K); PPC_LI32(r_addr, K);
@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
common_load_ind: common_load_ind:
/* /*
* Load from [X + K]. Negative offsets are tested for * Load from [X + K]. Negative offsets are tested for
* in the helper functions, and result in a 'ret 0'. * in the helper functions.
*/ */
ctx->seen |= SEEN_DATAREF | SEEN_XREG; ctx->seen |= SEEN_DATAREF | SEEN_XREG;
PPC_LI64(r_scratch1, func); PPC_LI64(r_scratch1, func);
@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break; break;
case BPF_S_LDX_B_MSH: case BPF_S_LDX_B_MSH:
/* func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
* x86 version drops packet (RET 0) when K<0, whereas
* interpreter does allow K<0 (__load_pointer, special
* ancillary data). common_load returns ENOTSUPP if K<0,
* so we fall back to interpreter & filter works.
*/
func = sk_load_byte_msh;
goto common_load; goto common_load;
break; break;

View File

@ -269,4 +269,4 @@ static int __init sunfire_init(void)
return 0; return 0;
} }
subsys_initcall(sunfire_init); fs_initcall(sunfire_init);

View File

@ -495,11 +495,11 @@ xcall_fetch_glob_regs:
stx %o7, [%g1 + GR_SNAP_O7] stx %o7, [%g1 + GR_SNAP_O7]
stx %i7, [%g1 + GR_SNAP_I7] stx %i7, [%g1 + GR_SNAP_I7]
/* Don't try this at home kids... */ /* Don't try this at home kids... */
rdpr %cwp, %g2 rdpr %cwp, %g3
sub %g2, 1, %g7 sub %g3, 1, %g7
wrpr %g7, %cwp wrpr %g7, %cwp
mov %i7, %g7 mov %i7, %g7
wrpr %g2, %cwp wrpr %g3, %cwp
stx %g7, [%g1 + GR_SNAP_RPC] stx %g7, [%g1 + GR_SNAP_RPC]
sethi %hi(trap_block), %g7 sethi %hi(trap_block), %g7
or %g7, %lo(trap_block), %g7 or %g7, %lo(trap_block), %g7

View File

@ -81,7 +81,7 @@ config X86
select CLKEVT_I8253 select CLKEVT_I8253
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_IOMAP select GENERIC_IOMAP
select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC select DCACHE_WORD_ACCESS
config INSTRUCTION_DECODER config INSTRUCTION_DECODER
def_bool (KPROBES || PERF_EVENTS) def_bool (KPROBES || PERF_EVENTS)

View File

@ -403,13 +403,11 @@ static void print_absolute_symbols(void)
for (i = 0; i < ehdr.e_shnum; i++) { for (i = 0; i < ehdr.e_shnum; i++) {
struct section *sec = &secs[i]; struct section *sec = &secs[i];
char *sym_strtab; char *sym_strtab;
Elf32_Sym *sh_symtab;
int j; int j;
if (sec->shdr.sh_type != SHT_SYMTAB) { if (sec->shdr.sh_type != SHT_SYMTAB) {
continue; continue;
} }
sh_symtab = sec->symtab;
sym_strtab = sec->link->strtab; sym_strtab = sec->link->strtab;
for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) {
Elf32_Sym *sym; Elf32_Sym *sym;

View File

@ -294,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* OK, This is the point of no return */ /* OK, This is the point of no return */
set_personality(PER_LINUX); set_personality(PER_LINUX);
set_thread_flag(TIF_IA32); set_personality_ia32(false);
current->mm->context.ia32_compat = 1;
setup_new_exec(bprm); setup_new_exec(bprm);

View File

@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a)
return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80);
} }
/*
* Load an unaligned word from kernel space.
*
* In the (very unlikely) case of the word being a page-crosser
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
unsigned long ret, dummy;
asm(
"1:\tmov %2,%0\n"
"2:\n"
".section .fixup,\"ax\"\n"
"3:\t"
"lea %2,%1\n\t"
"and %3,%1\n\t"
"mov (%1),%0\n\t"
"leal %2,%%ecx\n\t"
"andl %4,%%ecx\n\t"
"shll $3,%%ecx\n\t"
"shr %%cl,%0\n\t"
"jmp 2b\n"
".previous\n"
_ASM_EXTABLE(1b, 3b)
:"=&r" (ret),"=&c" (dummy)
:"m" (*(unsigned long *)addr),
"i" (-sizeof(unsigned long)),
"i" (sizeof(unsigned long)-1));
return ret;
}
#endif /* _ASM_WORD_AT_A_TIME_H */ #endif /* _ASM_WORD_AT_A_TIME_H */

View File

@ -580,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
} }
} }
/* re-enable TopologyExtensions if switched off by BIOS */
if ((c->x86 == 0x15) &&
(c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
u64 val;
if (!rdmsrl_amd_safe(0xc0011005, &val)) {
val |= 1ULL << 54;
wrmsrl_amd_safe(0xc0011005, val);
rdmsrl(0xc0011005, val);
if (val & (1ULL << 54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
printk(KERN_INFO FW_INFO "CPU: Re-enabling "
"disabled Topology Extensions Support\n");
}
}
}
cpu_detect_cache_sizes(c); cpu_detect_cache_sizes(c);
/* Multi core CPU? */ /* Multi core CPU? */

View File

@ -79,7 +79,6 @@ struct kvm_task_sleep_node {
u32 token; u32 token;
int cpu; int cpu;
bool halted; bool halted;
struct mm_struct *mm;
}; };
static struct kvm_task_sleep_head { static struct kvm_task_sleep_head {
@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token; n.token = token;
n.cpu = smp_processor_id(); n.cpu = smp_processor_id();
n.mm = current->active_mm;
n.halted = idle || preempt_count() > 1; n.halted = idle || preempt_count() > 1;
atomic_inc(&n.mm->mm_count);
init_waitqueue_head(&n.wq); init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list); hlist_add_head(&n.link, &b->list);
spin_unlock(&b->lock); spin_unlock(&b->lock);
@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
static void apf_task_wake_one(struct kvm_task_sleep_node *n) static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{ {
hlist_del_init(&n->link); hlist_del_init(&n->link);
if (!n->mm)
return;
mmdrop(n->mm);
if (n->halted) if (n->halted)
smp_send_reschedule(n->cpu); smp_send_reschedule(n->cpu);
else if (waitqueue_active(&n->wq)) else if (waitqueue_active(&n->wq))
@ -207,7 +201,7 @@ again:
* async PF was not yet handled. * async PF was not yet handled.
* Add dummy entry for the token. * Add dummy entry for the token.
*/ */
n = kmalloc(sizeof(*n), GFP_ATOMIC); n = kzalloc(sizeof(*n), GFP_ATOMIC);
if (!n) { if (!n) {
/* /*
* Allocation failed! Busy wait while other cpu * Allocation failed! Busy wait while other cpu
@ -219,7 +213,6 @@ again:
} }
n->token = token; n->token = token;
n->cpu = smp_processor_id(); n->cpu = smp_processor_id();
n->mm = NULL;
init_waitqueue_head(&n->wq); init_waitqueue_head(&n->wq);
hlist_add_head(&n->link, &b->list); hlist_add_head(&n->link, &b->list);
} else } else

View File

@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)
current_thread_info()->status |= TS_COMPAT; current_thread_info()->status |= TS_COMPAT;
} }
} }
EXPORT_SYMBOL_GPL(set_personality_ia32);
unsigned long get_wchan(struct task_struct *p) unsigned long get_wchan(struct task_struct *p)
{ {

View File

@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)
#endif #endif
rc = -EINVAL; rc = -EINVAL;
if (pcpu_chosen_fc != PCPU_FC_PAGE) { if (pcpu_chosen_fc != PCPU_FC_PAGE) {
const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;
const size_t dyn_size = PERCPU_MODULE_RESERVE + const size_t dyn_size = PERCPU_MODULE_RESERVE +
PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE;
size_t atom_size;
/*
* On 64bit, use PMD_SIZE for atom_size so that embedded
* percpu areas are aligned to PMD. This, in the future,
* can also allow using PMD mappings in vmalloc area. Use
* PAGE_SIZE on 32bit as vmalloc space is highly contended
* and large vmalloc area allocs can easily fail.
*/
#ifdef CONFIG_X86_64
atom_size = PMD_SIZE;
#else
atom_size = PAGE_SIZE;
#endif
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
dyn_size, atom_size, dyn_size, atom_size,
pcpu_cpu_distance, pcpu_cpu_distance,

View File

@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
kvm_inject_page_fault(vcpu, &fault); kvm_inject_page_fault(vcpu, &fault);
} }
vcpu->arch.apf.halted = false; vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} }
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)

View File

@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = {
.name = "net5501:1", .name = "net5501:1",
.gpio = 6, .gpio = 6,
.default_trigger = "default-on", .default_trigger = "default-on",
.active_low = 1, .active_low = 0,
}, },
}; };

View File

@ -63,6 +63,7 @@
#include <asm/stackprotector.h> #include <asm/stackprotector.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/mwait.h> #include <asm/mwait.h>
#include <asm/pci_x86.h>
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#include <linux/acpi.h> #include <linux/acpi.h>
@ -809,9 +810,40 @@ static void xen_io_delay(void)
} }
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
static unsigned long xen_set_apic_id(unsigned int x)
{
WARN_ON(1);
return x;
}
static unsigned int xen_get_apic_id(unsigned long x)
{
return ((x)>>24) & 0xFFu;
}
static u32 xen_apic_read(u32 reg) static u32 xen_apic_read(u32 reg)
{ {
struct xen_platform_op op = {
.cmd = XENPF_get_cpuinfo,
.interface_version = XENPF_INTERFACE_VERSION,
.u.pcpu_info.xen_cpuid = 0,
};
int ret = 0;
/* Shouldn't need this as APIC is turned off for PV, and we only
* get called on the bootup processor. But just in case. */
if (!xen_initial_domain() || smp_processor_id())
return 0; return 0;
if (reg == APIC_LVR)
return 0x10;
if (reg != APIC_ID)
return 0;
ret = HYPERVISOR_dom0_op(&op);
if (ret)
return 0;
return op.u.pcpu_info.apic_id << 24;
} }
static void xen_apic_write(u32 reg, u32 val) static void xen_apic_write(u32 reg, u32 val)
@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void)
apic->icr_write = xen_apic_icr_write; apic->icr_write = xen_apic_icr_write;
apic->wait_icr_idle = xen_apic_wait_icr_idle; apic->wait_icr_idle = xen_apic_wait_icr_idle;
apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
apic->set_apic_id = xen_set_apic_id;
apic->get_apic_id = xen_get_apic_id;
} }
#endif #endif
@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void)
/* Make sure ACS will be enabled */ /* Make sure ACS will be enabled */
pci_request_acs(); pci_request_acs();
} }
#ifdef CONFIG_PCI
/* PCI BIOS service won't work from a PV guest. */
pci_probe &= ~PCI_PROBE_BIOS;
#endif
xen_raw_console_write("about to get started...\n"); xen_raw_console_write("about to get started...\n");
xen_setup_runstate_info(0); xen_setup_runstate_info(0);

View File

@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
{ {
if (val & _PAGE_PRESENT) { if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
pteval_t flags = val & PTE_FLAGS_MASK; pteval_t flags = val & PTE_FLAGS_MASK;
val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; if (unlikely(pfn == ~0))
val = flags & ~_PAGE_PRESENT;
else
val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
} }
return val; return val;

View File

@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* We know a device's inferred power state when all the resources * We know a device's inferred power state when all the resources
* required for a given D-state are 'on'. * required for a given D-state are 'on'.
*/ */
for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) { for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
list = &device->power.states[i].resources; list = &device->power.states[i].resources;
if (list->count < 1) if (list->count < 1)
continue; continue;

View File

@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
/* /*
* Enumerate supported power management states * Enumerate supported power management states
*/ */
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) { for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
struct acpi_device_power_state *ps = &device->power.states[i]; struct acpi_device_power_state *ps = &device->power.states[i];
char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' }; char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' };
@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
acpi_bus_add_power_resource(ps->resources.handles[j]); acpi_bus_add_power_resource(ps->resources.handles[j]);
} }
/* The exist of _PR3 indicates D3Cold support */
if (i == ACPI_STATE_D3) {
status = acpi_get_handle(device->handle, object_name, &handle);
if (ACPI_SUCCESS(status))
device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
}
/* Evaluate "_PSx" to see if we can do explicit sets */ /* Evaluate "_PSx" to see if we can do explicit sets */
object_name[2] = 'S'; object_name[2] = 'S';
status = acpi_get_handle(device->handle, object_name, &handle); status = acpi_get_handle(device->handle, object_name, &handle);
if (ACPI_SUCCESS(status)) if (ACPI_SUCCESS(status))
ps->flags.explicit_set = 1; ps->flags.explicit_set = 1;
/* State is valid if we have some power control */ /*
if (ps->resources.count || ps->flags.explicit_set) * State is valid if there are means to put the device into it.
* D3hot is only valid if _PR3 present.
*/
if (ps->resources.count ||
(ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
ps->flags.valid = 1; ps->flags.valid = 1;
ps->power = -1; /* Unknown - driver assigned */ ps->power = -1; /* Unknown - driver assigned */

View File

@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */ .driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(0x1b4b, 0x9125), { PCI_DEVICE(0x1b4b, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */ .driver_data = board_ahci_yes_fbs }, /* 88se9125 */
{ PCI_DEVICE(0x1b4b, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(0x1b4b, 0x91a3), { PCI_DEVICE(0x1b4b, 0x91a3),
.driver_data = board_ahci_yes_fbs }, .driver_data = board_ahci_yes_fbs },

View File

@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = {
static const struct of_device_id ahci_of_match[] = { static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci", }, { .compatible = "calxeda,hb-ahci", },
{ .compatible = "snps,spear-ahci", },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, ahci_of_match); MODULE_DEVICE_TABLE(of, ahci_of_match);

View File

@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
static void ata_dev_xfermask(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev);
static unsigned long ata_dev_blacklisted(const struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
atomic_t ata_print_id = ATOMIC_INIT(1); atomic_t ata_print_id = ATOMIC_INIT(0);
struct ata_force_param { struct ata_force_param {
const char *name; const char *name;

View File

@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg
u64 now = get_jiffies_64(); u64 now = get_jiffies_64();
int *trials = void_arg; int *trials = void_arg;
if (ent->timestamp < now - min(now, interval)) if ((ent->eflags & ATA_EFLAG_OLD_ER) ||
(ent->timestamp < now - min(now, interval)))
return -1; return -1;
(*trials)++; (*trials)++;

View File

@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/ */
shost->max_host_blocked = 1; shost->max_host_blocked = 1;
rc = scsi_add_host(ap->scsi_host, &ap->tdev); rc = scsi_add_host_with_dma(ap->scsi_host,
&ap->tdev, ap->host->dev);
if (rc) if (rc)
goto err_add; goto err_add;
} }
@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap)
} }
EXPORT_SYMBOL_GPL(ata_sas_port_stop); EXPORT_SYMBOL_GPL(ata_sas_port_stop);
int ata_sas_async_port_init(struct ata_port *ap) /**
* ata_sas_async_probe - simply schedule probing and return
* @ap: Port to probe
*
* For batch scheduling of probe for sas attached ata devices, assumes
* the port has already been through ata_sas_port_init()
*/
void ata_sas_async_probe(struct ata_port *ap)
{ {
int rc = ap->ops->port_start(ap);
if (!rc) {
ap->print_id = atomic_inc_return(&ata_print_id);
__ata_port_probe(ap); __ata_port_probe(ap);
}
return rc;
} }
EXPORT_SYMBOL_GPL(ata_sas_async_port_init); EXPORT_SYMBOL_GPL(ata_sas_async_probe);
int ata_sas_sync_probe(struct ata_port *ap)
{
return ata_port_probe(ap);
}
EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
/** /**
* ata_sas_port_init - Initialize a SATA device * ata_sas_port_init - Initialize a SATA device
@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap)
{ {
int rc = ap->ops->port_start(ap); int rc = ap->ops->port_start(ap);
if (!rc) { if (rc)
ap->print_id = atomic_inc_return(&ata_print_id);
rc = ata_port_probe(ap);
}
return rc; return rc;
ap->print_id = atomic_inc_return(&ata_print_id);
return 0;
} }
EXPORT_SYMBOL_GPL(ata_sas_port_init); EXPORT_SYMBOL_GPL(ata_sas_port_init);

View File

@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev)
return 0; return 0;
} }
#endif
static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume); static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
#endif
static struct platform_driver arasan_cf_driver = { static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe, .probe = arasan_cf_probe,
@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = {
.driver = { .driver = {
.name = DRIVER_NAME, .name = DRIVER_NAME,
.owner = THIS_MODULE, .owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &arasan_cf_pm_ops, .pm = &arasan_cf_pm_ops,
#endif
}, },
}; };

View File

@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
map->format.parse_val(val + i); map->format.parse_val(val + i);
} else { } else {
for (i = 0; i < val_count; i++) { for (i = 0; i < val_count; i++) {
ret = regmap_read(map, reg + i, val + (i * val_bytes)); unsigned int ival;
ret = regmap_read(map, reg + i, &ival);
if (ret != 0) if (ret != 0)
return ret; return ret;
memcpy(val + (i * val_bytes), &ival, val_bytes);
} }
} }

View File

@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311D) },
{ USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3375) },
{ USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) }, { USB_DEVICE(0x0489, 0xE02C) },
@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };

View File

@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) }, { USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */ /* Broadcom BCM20702A0 */
{ USB_DEVICE(0x0489, 0xe042) },
{ USB_DEVICE(0x0a5c, 0x21e3) }, { USB_DEVICE(0x0a5c, 0x21e3) },
{ USB_DEVICE(0x0a5c, 0x21e6) }, { USB_DEVICE(0x0a5c, 0x21e6) },
{ USB_DEVICE(0x0a5c, 0x21e8) }, { USB_DEVICE(0x0a5c, 0x21e8) },
{ USB_DEVICE(0x0a5c, 0x21f3) }, { USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) }, { USB_DEVICE(0x413c, 0x8197) },
/* Foxconn - Hon Hai */
{ USB_DEVICE(0x0489, 0xe033) },
{ } /* Terminating entry */ { } /* Terminating entry */
}; };
@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */ /* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },

View File

@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
} }
} }
static bool
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
struct efi_generic_dev_path *node;
int offset = 0;
node = (struct efi_generic_dev_path *)buffer;
if (len < sizeof(*node))
return false;
while (offset <= len - sizeof(*node) &&
node->length >= sizeof(*node) &&
node->length <= len - offset) {
offset += node->length;
if ((node->type == EFI_DEV_END_PATH ||
node->type == EFI_DEV_END_PATH2) &&
node->sub_type == EFI_DEV_END_ENTIRE)
return true;
node = (struct efi_generic_dev_path *)(buffer + offset);
}
/*
* If we're here then either node->length pointed past the end
* of the buffer or we reached the end of the buffer without
* finding a device path end node.
*/
return false;
}
static bool
validate_boot_order(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* An array of 16-bit integers */
if ((len % 2) != 0)
return false;
return true;
}
static bool
validate_load_option(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
u16 filepathlength;
int i, desclength = 0, namelen;
namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
/* Either "Boot" or "Driver" followed by four digits of hex */
for (i = match; i < match+4; i++) {
if (var->VariableName[i] > 127 ||
hex_to_bin(var->VariableName[i] & 0xff) < 0)
return true;
}
/* Reject it if there's 4 digits of hex and then further content */
if (namelen > match + 4)
return false;
/* A valid entry must be at least 8 bytes */
if (len < 8)
return false;
filepathlength = buffer[4] | buffer[5] << 8;
/*
* There's no stored length for the description, so it has to be
* found by hand
*/
desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
/* Each boot entry must have a descriptor */
if (!desclength)
return false;
/*
* If the sum of the length of the description, the claimed filepath
* length and the original header are greater than the length of the
* variable, it's malformed
*/
if ((desclength + filepathlength + 6) > len)
return false;
/*
* And, finally, check the filepath
*/
return validate_device_path(var, match, buffer + desclength + 6,
filepathlength);
}
static bool
validate_uint16(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
/* A single 16-bit integer */
if (len != 2)
return false;
return true;
}
static bool
validate_ascii_string(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
{
int i;
for (i = 0; i < len; i++) {
if (buffer[i] > 127)
return false;
if (buffer[i] == 0)
return true;
}
return false;
}
struct variable_validate {
char *name;
bool (*validate)(struct efi_variable *var, int match, u8 *data,
unsigned long len);
};
static const struct variable_validate variable_validate[] = {
{ "BootNext", validate_uint16 },
{ "BootOrder", validate_boot_order },
{ "DriverOrder", validate_boot_order },
{ "Boot*", validate_load_option },
{ "Driver*", validate_load_option },
{ "ConIn", validate_device_path },
{ "ConInDev", validate_device_path },
{ "ConOut", validate_device_path },
{ "ConOutDev", validate_device_path },
{ "ErrOut", validate_device_path },
{ "ErrOutDev", validate_device_path },
{ "Timeout", validate_uint16 },
{ "Lang", validate_ascii_string },
{ "PlatformLang", validate_ascii_string },
{ "", NULL },
};
static bool
validate_var(struct efi_variable *var, u8 *data, unsigned long len)
{
int i;
u16 *unicode_name = var->VariableName;
for (i = 0; variable_validate[i].validate != NULL; i++) {
const char *name = variable_validate[i].name;
int match;
for (match = 0; ; match++) {
char c = name[match];
u16 u = unicode_name[match];
/* All special variables are plain ascii */
if (u > 127)
return true;
/* Wildcard in the matching name means we've matched */
if (c == '*')
return variable_validate[i].validate(var,
match, data, len);
/* Case sensitive match */
if (c != u)
break;
/* Reached the end of the string while matching */
if (!c)
return variable_validate[i].validate(var,
match, data, len);
}
}
return true;
}
static efi_status_t static efi_status_t
get_var_data_locked(struct efivars *efivars, struct efi_variable *var) get_var_data_locked(struct efivars *efivars, struct efi_variable *var)
{ {
@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
return -EINVAL; return -EINVAL;
} }
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock); spin_lock(&efivars->lock);
status = efivars->ops->set_variable(new_var->VariableName, status = efivars->ops->set_variable(new_var->VariableName,
&new_var->VendorGuid, &new_var->VendorGuid,
@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EACCES; return -EACCES;
if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 ||
validate_var(new_var, new_var->Data, new_var->DataSize) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
spin_lock(&efivars->lock); spin_lock(&efivars->lock);
/* /*

View File

@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused)
unsigned long temp, chipset, gfx; unsigned long temp, chipset, gfx;
int ret; int ret;
if (!IS_GEN5(dev))
return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;

View File

@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv)
unsigned long diffms; unsigned long diffms;
u32 count; u32 count;
if (dev_priv->info->gen != 5)
return;
getrawmonotonic(&now); getrawmonotonic(&now);
diff1 = timespec_sub(now, dev_priv->last_time2); diff1 = timespec_sub(now, dev_priv->last_time2);
@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
(unsigned long) dev); (unsigned long) dev);
if (IS_GEN5(dev)) {
spin_lock(&mchdev_lock); spin_lock(&mchdev_lock);
i915_mch_dev = dev_priv; i915_mch_dev = dev_priv;
dev_priv->mchdev_lock = &mchdev_lock; dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock); spin_unlock(&mchdev_lock);
ips_ping_for_i915_load(); ips_ping_for_i915_load();
}
return 0; return 0;

View File

@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
int dpll = I915_READ(dpll_reg);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
return; return;
@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
* the manual case. * the manual case.
*/ */
if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
int pipe = intel_crtc->pipe;
int dpll_reg = DPLL(pipe);
u32 dpll;
DRM_DEBUG_DRIVER("downclocking LVDS\n"); DRM_DEBUG_DRIVER("downclocking LVDS\n");
assert_panel_unlocked(dev_priv, pipe); assert_panel_unlocked(dev_priv, pipe);
dpll = I915_READ(dpll_reg);
dpll |= DISPLAY_RATE_SELECT_FPA1; dpll |= DISPLAY_RATE_SELECT_FPA1;
I915_WRITE(dpll_reg, dpll); I915_WRITE(dpll_reg, dpll);
intel_wait_for_vblank(dev, pipe); intel_wait_for_vblank(dev, pipe);
@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
} }
} }
/** /**

View File

@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder,
val &= ~VIDEO_DIP_SELECT_MASK; val &= ~VIDEO_DIP_SELECT_MASK;
I915_WRITE(VIDEO_DIP_CTL, val | port | flags); I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags);
for (i = 0; i < len; i += 4) { for (i = 0; i < len; i += 4) {
I915_WRITE(VIDEO_DIP_DATA, *data); I915_WRITE(VIDEO_DIP_DATA, *data);

View File

@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.ident = "Hewlett-Packard t5745", .ident = "Hewlett-Packard t5745",
.matches = { .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"),
}, },
}, },
{ {
@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = {
.ident = "Hewlett-Packard st5747", .ident = "Hewlett-Packard st5747",
.matches = { .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"),
}, },
}, },
{ {

View File

@ -398,10 +398,8 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret; return ret;
} }
if (INTEL_INFO(dev)->gen >= 6) {
I915_WRITE(INSTPM,
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
if (IS_GEN6(dev)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24: /* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement * "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement * policy. [...] This bit must be reset. LRA replacement
@ -411,6 +409,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
} }
if (INTEL_INFO(dev)->gen >= 6) {
I915_WRITE(INSTPM,
INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
}
return ret; return ret;
} }

View File

@ -1220,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in
static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo)
{ {
struct drm_device *dev = intel_sdvo->base.base.dev;
u8 response[2]; u8 response[2];
/* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise
* on the line. */
if (IS_I945G(dev) || IS_I945GM(dev))
return false;
return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
&response, 2) && response[0]; &response, 2) && response[0];
} }

View File

@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void)
struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
int has_dsm = 0; int has_dsm = 0;
int has_optimus; int has_optimus = 0;
int vga_count = 0; int vga_count = 0;
bool guid_valid; bool guid_valid;
int retval; int retval;

View File

@ -6156,11 +6156,15 @@ dcb_fake_connectors(struct nvbios *bios)
/* heuristic: if we ever get a non-zero connector field, assume /* heuristic: if we ever get a non-zero connector field, assume
* that all the indices are valid and we don't need fake them. * that all the indices are valid and we don't need fake them.
*
* and, as usual, a blacklist of boards with bad bios data..
*/ */
if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) {
for (i = 0; i < dcbt->entries; i++) { for (i = 0; i < dcbt->entries; i++) {
if (dcbt->entry[i].connector) if (dcbt->entry[i].connector)
return; return;
} }
}
/* no useful connector info available, we need to make it up /* no useful connector info available, we need to make it up
* ourselves. the rule here is: anything on the same i2c bus * ourselves. the rule here is: anything on the same i2c bus

View File

@ -32,7 +32,9 @@ static bool
hdmi_sor(struct drm_encoder *encoder) hdmi_sor(struct drm_encoder *encoder)
{ {
struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
if (dev_priv->chipset < 0xa3) if (dev_priv->chipset < 0xa3 ||
dev_priv->chipset == 0xaa ||
dev_priv->chipset == 0xac)
return false; return false;
return true; return true;
} }

View File

@ -29,10 +29,6 @@
#include "nouveau_i2c.h" #include "nouveau_i2c.h"
#include "nouveau_hw.h" #include "nouveau_hw.h"
#define T_TIMEOUT 2200000
#define T_RISEFALL 1000
#define T_HOLD 5000
static void static void
i2c_drive_scl(void *data, int state) i2c_drive_scl(void *data, int state)
{ {
@ -113,175 +109,6 @@ i2c_sense_sda(void *data)
return 0; return 0;
} }
static void
i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
{
udelay((nsec + 500) / 1000);
}
static bool
i2c_raise_scl(struct nouveau_i2c_chan *port)
{
u32 timeout = T_TIMEOUT / T_RISEFALL;
i2c_drive_scl(port, 1);
do {
i2c_delay(port, T_RISEFALL);
} while (!i2c_sense_scl(port) && --timeout);
return timeout != 0;
}
static int
i2c_start(struct nouveau_i2c_chan *port)
{
int ret = 0;
port->state = i2c_sense_scl(port);
port->state |= i2c_sense_sda(port) << 1;
if (port->state != 3) {
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 1);
if (!i2c_raise_scl(port))
ret = -EBUSY;
}
i2c_drive_sda(port, 0);
i2c_delay(port, T_HOLD);
i2c_drive_scl(port, 0);
i2c_delay(port, T_HOLD);
return ret;
}
static void
i2c_stop(struct nouveau_i2c_chan *port)
{
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 0);
i2c_delay(port, T_RISEFALL);
i2c_drive_scl(port, 1);
i2c_delay(port, T_HOLD);
i2c_drive_sda(port, 1);
i2c_delay(port, T_HOLD);
}
static int
i2c_bitw(struct nouveau_i2c_chan *port, int sda)
{
i2c_drive_sda(port, sda);
i2c_delay(port, T_RISEFALL);
if (!i2c_raise_scl(port))
return -ETIMEDOUT;
i2c_delay(port, T_HOLD);
i2c_drive_scl(port, 0);
i2c_delay(port, T_HOLD);
return 0;
}
static int
i2c_bitr(struct nouveau_i2c_chan *port)
{
int sda;
i2c_drive_sda(port, 1);
i2c_delay(port, T_RISEFALL);
if (!i2c_raise_scl(port))
return -ETIMEDOUT;
i2c_delay(port, T_HOLD);
sda = i2c_sense_sda(port);
i2c_drive_scl(port, 0);
i2c_delay(port, T_HOLD);
return sda;
}
static int
i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
{
int i, bit;
*byte = 0;
for (i = 7; i >= 0; i--) {
bit = i2c_bitr(port);
if (bit < 0)
return bit;
*byte |= bit << i;
}
return i2c_bitw(port, last ? 1 : 0);
}
static int
i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
{
int i, ret;
for (i = 7; i >= 0; i--) {
ret = i2c_bitw(port, !!(byte & (1 << i)));
if (ret < 0)
return ret;
}
ret = i2c_bitr(port);
if (ret == 1) /* nack */
ret = -EIO;
return ret;
}
static int
i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
if (msg->flags & I2C_M_RD)
addr |= 1;
return i2c_put_byte(port, addr);
}
static int
i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
struct i2c_msg *msg = msgs;
int ret = 0, mcnt = num;
while (!ret && mcnt--) {
u8 remaining = msg->len;
u8 *ptr = msg->buf;
ret = i2c_start(port);
if (ret == 0)
ret = i2c_addr(port, msg);
if (msg->flags & I2C_M_RD) {
while (!ret && remaining--)
ret = i2c_get_byte(port, ptr++, !remaining);
} else {
while (!ret && remaining--)
ret = i2c_put_byte(port, *ptr++);
}
msg++;
}
i2c_stop(port);
return (ret < 0) ? ret : num;
}
static u32
i2c_bit_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
const struct i2c_algorithm nouveau_i2c_bit_algo = {
.master_xfer = i2c_bit_xfer,
.functionality = i2c_bit_func
};
static const uint32_t nv50_i2c_port[] = { static const uint32_t nv50_i2c_port[] = {
0x00e138, 0x00e150, 0x00e168, 0x00e180, 0x00e138, 0x00e150, 0x00e168, 0x00e180,
0x00e254, 0x00e274, 0x00e764, 0x00e780, 0x00e254, 0x00e274, 0x00e764, 0x00e780,
@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev)
case 0: /* NV04:NV50 */ case 0: /* NV04:NV50 */
port->drive = entry[0]; port->drive = entry[0];
port->sense = entry[1]; port->sense = entry[1];
port->adapter.algo = &nouveau_i2c_bit_algo;
break; break;
case 4: /* NV4E */ case 4: /* NV4E */
port->drive = 0x600800 + entry[1]; port->drive = 0x600800 + entry[1];
port->sense = port->drive; port->sense = port->drive;
port->adapter.algo = &nouveau_i2c_bit_algo;
break; break;
case 5: /* NV50- */ case 5: /* NV50- */
port->drive = entry[0] & 0x0f; port->drive = entry[0] & 0x0f;
@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev)
port->drive = 0x00d014 + (port->drive * 0x20); port->drive = 0x00d014 + (port->drive * 0x20);
port->sense = port->drive; port->sense = port->drive;
} }
port->adapter.algo = &nouveau_i2c_bit_algo;
break; break;
case 6: /* NV50- DP AUX */ case 6: /* NV50- DP AUX */
port->drive = entry[0]; port->drive = entry[0];
@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev)
break; break;
} }
if (!port->adapter.algo) { if (!port->adapter.algo && !port->drive) {
NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n", NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
i, port->type, port->drive, port->sense); i, port->type, port->drive, port->sense);
kfree(port); kfree(port);
@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev)
port->dcb = ROM32(entry[0]); port->dcb = ROM32(entry[0]);
i2c_set_adapdata(&port->adapter, i2c); i2c_set_adapdata(&port->adapter, i2c);
if (port->adapter.algo != &nouveau_dp_i2c_algo) {
port->adapter.algo_data = &port->bit;
port->bit.udelay = 10;
port->bit.timeout = usecs_to_jiffies(2200);
port->bit.data = port;
port->bit.setsda = i2c_drive_sda;
port->bit.setscl = i2c_drive_scl;
port->bit.getsda = i2c_sense_sda;
port->bit.getscl = i2c_sense_scl;
i2c_drive_scl(port, 0);
i2c_drive_sda(port, 1);
i2c_drive_scl(port, 1);
ret = i2c_bit_add_bus(&port->adapter);
} else {
port->adapter.algo = &nouveau_dp_i2c_algo;
ret = i2c_add_adapter(&port->adapter); ret = i2c_add_adapter(&port->adapter);
}
if (ret) { if (ret) {
NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret); NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
kfree(port); kfree(port);

View File

@ -34,6 +34,7 @@
struct nouveau_i2c_chan { struct nouveau_i2c_chan {
struct i2c_adapter adapter; struct i2c_adapter adapter;
struct drm_device *dev; struct drm_device *dev;
struct i2c_algo_bit_data bit;
struct list_head head; struct list_head head;
u8 index; u8 index;
u8 type; u8 type;

View File

@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
if (line < 10) { if (line < 10) {
line = (line - 2) * 4; line = (line - 2) * 4;
reg = NV_PCRTC_GPIO_EXT; reg = NV_PCRTC_GPIO_EXT;
mask = 0x00000003 << ((line - 2) * 4); mask = 0x00000003;
data = (dir << 1) | out; data = (dir << 1) | out;
} else } else
if (line < 14) { if (line < 14) {

View File

@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev)
nvc0_mfb_subp_isr(dev, unit, subp); nvc0_mfb_subp_isr(dev, unit, subp);
units &= ~(1 << unit); units &= ~(1 << unit);
} }
/* we do something horribly wrong and upset PMFB a lot, so mask off
* interrupts from it after the first one until it's fixed
*/
nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
} }
static void static void

View File

@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev)
rdev->wb.use_event = true; rdev->wb.use_event = true;
} }
} }
/* always use writeback/events on NI */ /* always use writeback/events on NI, APUs */
if (ASIC_IS_DCE5(rdev)) { if (rdev->family >= CHIP_PALM) {
rdev->wb.enabled = true; rdev->wb.enabled = true;
rdev->wb.use_event = true; rdev->wb.use_event = true;
} }

View File

@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444);
MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */
#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */
#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */
#define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
indx = TO_ATTR_NO(cpu); indx = TO_ATTR_NO(cpu);
/* The core id is too big, just return */
if (indx > MAX_CORE_DATA - 1)
return;
if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu)
coretemp_remove_core(pdata, &pdev->dev, indx); coretemp_remove_core(pdata, &pdev->dev, indx);

Some files were not shown because too many files have changed in this diff Show More