dect
/
linux-2.6
Archived
13
0
Fork 0

Merge master.kernel.org:/home/rmk/linux-2.6-arm

* master.kernel.org:/home/rmk/linux-2.6-arm:
  ARM: 5974/1: arm/mach-at91 Makefile: remove two blanks.
  ARM: 6052/1: kdump: make kexec work in interrupt context
  ARM: 6051/1: VFP: preserve the HW context when calling signal handlers
  ARM: 6050/1: VFP: fix the SMP versions of vfp_{sync,flush}_hwstate
  ARM: 6007/1: fix highmem with VIPT cache and DMA
  ARM: 5975/1: AT91 slow-clock suspend: don't wait when turning PLLs off
This commit is contained in:
Linus Torvalds 2010-04-19 07:26:21 -07:00
commit eb3e5cce2b
13 changed files with 251 additions and 68 deletions

View File

@ -11,7 +11,11 @@
#define kmap_prot PAGE_KERNEL
#define flush_cache_kmaps() flush_cache_all()
#define flush_cache_kmaps() \
do { \
if (cache_is_vivt()) \
flush_cache_all(); \
} while (0)
extern pte_t *pkmap_page_table;
@ -21,11 +25,20 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
/*
* The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set.
*/
#ifdef CONFIG_HIGHMEM
extern void *kmap(struct page *page);
extern void kunmap(struct page *page);
extern void *kmap_atomic(struct page *page, enum km_type type);
extern void kunmap_atomic(void *kvaddr, enum km_type type);
extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
extern struct page *kmap_atomic_to_page(const void *ptr);
#endif
#endif

View File

@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_L1_CACHE,
KM_L2_CACHE,
KM_TYPE_NR
};

View File

@ -59,23 +59,22 @@ struct iwmmxt_sigframe {
#endif /* CONFIG_IWMMXT */
#ifdef CONFIG_VFP
#if __LINUX_ARM_ARCH__ < 6
/* For ARM pre-v6, we use fstmiax and fldmiax. This adds one extra
* word after the registers, and a word of padding at the end for
* alignment. */
#define VFP_MAGIC 0x56465001
#define VFP_STORAGE_SIZE 152
#else
#define VFP_MAGIC 0x56465002
#define VFP_STORAGE_SIZE 144
#endif
struct vfp_sigframe
{
unsigned long magic;
unsigned long size;
union vfp_state storage;
};
struct user_vfp ufp;
struct user_vfp_exc ufp_exc;
} __attribute__((__aligned__(8)));
/*
* 8 byte for magic and size, 264 byte for ufp, 12 bytes for ufp_exc,
* 4 bytes padding.
*/
#define VFP_STORAGE_SIZE sizeof(struct vfp_sigframe)
#endif /* CONFIG_VFP */
/*
@ -91,7 +90,7 @@ struct aux_sigframe {
#ifdef CONFIG_IWMMXT
struct iwmmxt_sigframe iwmmxt;
#endif
#if 0 && defined CONFIG_VFP /* Not yet saved. */
#ifdef CONFIG_VFP
struct vfp_sigframe vfp;
#endif
/* Something that isn't a valid magic number for any coprocessor. */

View File

@ -83,11 +83,21 @@ struct user{
/*
* User specific VFP registers. If only VFPv2 is present, registers 16 to 31
* are ignored by the ptrace system call.
* are ignored by the ptrace system call and the signal handler.
*/
struct user_vfp {
unsigned long long fpregs[32];
unsigned long fpscr;
};
/*
* VFP exception registers exposed to user space during signal delivery.
* Fields not relavant to the current VFP architecture are ignored.
*/
struct user_vfp_exc {
unsigned long fpexc;
unsigned long fpinst;
unsigned long fpinst2;
};
#endif /* _ARM_USER_H */

View File

@ -18,6 +18,7 @@
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
#include "ptrace.h"
#include "signal.h"
@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
#endif
#ifdef CONFIG_VFP
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
const unsigned long magic = VFP_MAGIC;
const unsigned long size = VFP_STORAGE_SIZE;
int err = 0;
vfp_sync_hwstate(thread);
__put_user_error(magic, &frame->magic, err);
__put_user_error(size, &frame->size, err);
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__put_user_error(h->fpscr, &frame->ufp.fpscr, err);
/*
* Copy the exception registers.
*/
__put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err);
__put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
return err ? -EFAULT : 0;
}
static int restore_vfp_context(struct vfp_sigframe __user *frame)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *h = &thread->vfpstate.hard;
unsigned long magic;
unsigned long size;
unsigned long fpexc;
int err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
if (err)
return -EFAULT;
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs,
sizeof(h->fpregs));
/*
* Copy the status and control register.
*/
__get_user_error(h->fpscr, &frame->ufp.fpscr, err);
/*
* Sanitise and restore the exception registers.
*/
__get_user_error(fpexc, &frame->ufp_exc.fpexc, err);
/* Ensure the VFP is enabled. */
fpexc |= FPEXC_EN;
/* Ensure FPINST2 is invalid and the exception flag is cleared. */
fpexc &= ~(FPEXC_EX | FPEXC_FP2V);
h->fpexc = fpexc;
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
if (!err)
vfp_flush_hwstate(thread);
return err ? -EFAULT : 0;
}
#endif
/*
* Do a signal return; undo the signal stack. These are aligned to 64-bit.
*/
@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err |= restore_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
// err |= vfp_restore_state(&sf->aux.vfp);
if (err == 0)
err |= restore_vfp_context(&aux->vfp);
#endif
return err;
@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
err |= preserve_iwmmxt_context(&aux->iwmmxt);
#endif
#ifdef CONFIG_VFP
// if (err == 0)
// err |= vfp_save_state(&sf->aux.vfp);
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
__put_user_error(0, &aux->end_magic, err);

View File

@ -16,8 +16,8 @@ obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_d
obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9RL) += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91CAP9) += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT572D940HF) += at572d940hf.o at91sam926x_time.o at572d940hf_devices.o sam9_smc.o
obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o

View File

@ -175,8 +175,6 @@ ENTRY(at91_slow_clock)
orr r3, r3, #(1 << 29) /* bit 29 always set */
str r3, [r1, #(AT91_CKGR_PLLAR - AT91_PMC)]
wait_pllalock
/* Save PLLB setting and disable it */
ldr r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
str r3, .saved_pllbr
@ -184,8 +182,6 @@ ENTRY(at91_slow_clock)
mov r3, #AT91_PMC_PLLCOUNT
str r3, [r1, #(AT91_CKGR_PLLBR - AT91_PMC)]
wait_pllblock
/* Turn off the main oscillator */
ldr r3, [r1, #(AT91_CKGR_MOR - AT91_PMC)]
bic r3, r3, #AT91_PMC_MOSCEN

View File

@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
#ifdef CONFIG_HIGHMEM
/*
* kmap_atomic() doesn't set the page virtual address, and
* kunmap_atomic() takes care of cache flushing already.
*/
if (page_address(to) != NULL)
#endif
__cpuc_flush_dcache_area(kto, PAGE_SIZE);
__cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}

View File

@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
vaddr += offset;
op(vaddr, len, dir);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
vaddr = kmap_high_l1_vipt(page, &saved_pte);
op(vaddr + offset, len, dir);
kunmap_high_l1_vipt(page, saved_pte);
}
} else {
vaddr = page_address(page) + offset;

View File

@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/highmem.h>
#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
void *addr = page_address(page);
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
* coherent with the kernels mapping.
*/
#ifdef CONFIG_HIGHMEM
/*
* kmap_atomic() doesn't set the page virtual address, and
* kunmap_atomic() takes care of cache flushing already.
*/
if (addr)
#endif
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
if (!PageHighMem(page)) {
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} else {
void *addr = kmap_high_get(page);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
addr = kmap_high_l1_vipt(page, &saved_pte);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high_l1_vipt(page, saved_pte);
}
}
/*
* If this is a page cache page, and we have an aliasing VIPT cache,

View File

@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) {
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
#ifdef CONFIG_CPU_CACHE_VIPT
#include <linux/percpu.h>
/*
* The VIVT cache of a highmem page is always flushed before the page
* is unmapped. Hence unmapped highmem pages need no cache maintenance
* in that case.
*
* However unmapped pages may still be cached with a VIPT cache, and
* it is not possible to perform cache maintenance on them using physical
* addresses unfortunately. So we have no choice but to set up a temporary
* virtual mapping for that purpose.
*
* Yet this VIPT cache maintenance may be triggered from DMA support
* functions which are possibly called from interrupt context. As we don't
* want to keep interrupt disabled all the time when such maintenance is
* taking place, we therefore allow for some reentrancy by preserving and
* restoring the previous fixmap entry before the interrupted context is
* resumed. If the reentrancy depth is 0 then there is no need to restore
* the previous fixmap, and leaving the current one in place allow it to
* be reused the next time without a TLB flush (common with DMA).
*/
static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{
unsigned int idx, cpu = smp_processor_id();
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
unsigned long vaddr, flags;
pte_t pte, *ptep;
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
if (!in_interrupt())
preempt_disable();
raw_local_irq_save(flags);
(*depth)++;
if (pte_val(*ptep) == pte_val(pte)) {
*saved_pte = pte;
} else {
*saved_pte = *ptep;
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
return (void *)vaddr;
}
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
{
unsigned int idx, cpu = smp_processor_id();
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
unsigned long vaddr, flags;
pte_t pte, *ptep;
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
BUG_ON(pte_val(*ptep) != pte_val(pte));
BUG_ON(*depth <= 0);
raw_local_irq_save(flags);
(*depth)--;
if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
set_pte_ext(ptep, saved_pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
if (!in_interrupt())
preempt_enable();
}
#endif /* CONFIG_CPU_CACHE_VIPT */

View File

@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode)
pgd_t *pgd;
int i;
if (current->mm && current->mm->pgd)
pgd = current->mm->pgd;
else
pgd = init_mm.pgd;
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
pgd = current->active_mm->pgd;
base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())

View File

@ -428,26 +428,6 @@ static void vfp_pm_init(void)
static inline void vfp_pm_init(void) { }
#endif /* CONFIG_PM */
/*
* Synchronise the hardware VFP state of a thread other than current with the
* saved one. This function is used by the ptrace mechanism.
*/
#ifdef CONFIG_SMP
void vfp_sync_hwstate(struct thread_info *thread)
{
}
void vfp_flush_hwstate(struct thread_info *thread)
{
/*
* On SMP systems, the VFP state is automatically saved at every
* context switch. We mark the thread VFP state as belonging to a
* non-existent CPU so that the saved one will be reloaded when
* needed.
*/
thread->vfpstate.hard.cpu = NR_CPUS;
}
#else
void vfp_sync_hwstate(struct thread_info *thread)
{
unsigned int cpu = get_cpu();
@ -490,9 +470,18 @@ void vfp_flush_hwstate(struct thread_info *thread)
last_VFP_context[cpu] = NULL;
}
#ifdef CONFIG_SMP
/*
* For SMP we still have to take care of the case where the thread
* migrates to another CPU and then back to the original CPU on which
* the last VFP user is still the same thread. Mark the thread VFP
* state as belonging to a non-existent CPU so that the saved one will
* be reloaded in the above case.
*/
thread->vfpstate.hard.cpu = NR_CPUS;
#endif
put_cpu();
}
#endif
#include <linux/smp.h>