9
0
Fork 0

Add new common lazy FPU state saving option for ARMv7-M. Not yet verified

This commit is contained in:
Gregory Nutt 2015-03-06 08:26:43 -06:00
parent 18c66fad13
commit a2380d0903
17 changed files with 461 additions and 21 deletions

View File

@ -161,6 +161,7 @@ config ARCH_CHIP_SAMD
config ARCH_CHIP_SAM34
bool "Atmel SAM3/SAM4"
select ARCH_HAVE_CMNVECTOR
select ARCH_HAVE_MPU
select ARCH_HAVE_RAMFUNCS
---help---
@ -168,6 +169,8 @@ config ARCH_CHIP_SAM34
config ARCH_CHIP_SAMV7
bool "Atmel SAMV7"
select ARCH_HAVE_CMNVECTOR
select ARMV7M_CMNVECTOR
select ARCH_HAVE_MPU
select ARCH_HAVE_RAMFUNCS
---help---
@ -306,6 +309,40 @@ config ARMV7M_CMNVECTOR
logic or the common vector logic. This applies only to ARMv7-M
architectures.
config ARMV7M_LAZYFPU
bool "Lazy FPU storage"
default n
depends on ARCH_HAVE_CMNVECTOR
---help---
There are two forms of the common vector logic. There are pros and
cons to each option:
1) The standard common vector logic exploits features of the ARMv7-M
architecture to save the all of floating registers on entry into
each interrupt and then to restore the floating registers when
the interrupt returns. The primary advantage to this approach is
that floating point operations are available in interrupt
handling logic. Since the volatile registers are preserved,
operations on the floating point registers by interrupt handling
logic has no ill effect. The downside is, of course, that more
stack operations are required on each interrupt to save and store
the floating point registers. Because of the some special
features of the ARMv-M, this is not as much overhead as you might
expect, but overhead nonetheless.
2) The lazy FPU common vector logic does not save or restore
floating point registers on entry and exit from the interrupt
handler. Rather, the floating point registers are not restored
until it is absolutely necessary to do so when a context switch
occurs and the interrupt handler will be returning to a different
floating point context. Since floating point registers are not
protected, floating point operations must not be performed in
interrupt handling logic. Better interrupt performance is be
expected, however.
By default, the "standard" common vector logic is build. This
option selects the alternate lazy FPU common vector logic.
config ARCH_HAVE_FPU
bool
default n

View File

@ -54,7 +54,7 @@
/* Included implementation-dependent register save structure layouts */
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
# include <arch/armv7-m/irq_cmnvector.h>
#else
# include <arch/armv7-m/irq_lazyfpu.h>

View File

@ -93,7 +93,7 @@
* state from the main stack. Execution uses MSP after return.
*/
#if defined(CONFIG_ARMV7M_CMNVECTOR) && defined(CONFIG_ARCH_FPU)
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU) && defined(CONFIG_ARCH_FPU)
# define EXC_RETURN_PRIVTHR (EXC_RETURN_BASE | EXC_RETURN_THREAD_MODE)
#else
# define EXC_RETURN_PRIVTHR (EXC_RETURN_BASE | EXC_RETURN_STD_CONTEXT | \
@ -104,7 +104,7 @@
* state from the process stack. Execution uses PSP after return.
*/
#if defined(CONFIG_ARMV7M_CMNVECTOR) && defined(CONFIG_ARCH_FPU)
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU) && defined(CONFIG_ARCH_FPU)
# define EXC_RETURN_UNPRIVTHR (EXC_RETURN_BASE | EXC_RETURN_THREAD_MODE | \
EXC_RETURN_PROCESS_STACK)
#else

View File

@ -45,7 +45,8 @@
#include "up_internal.h"
#if defined(CONFIG_ARCH_FPU) && !defined(CONFIG_ARMV7M_CMNVECTOR)
#if defined(CONFIG_ARCH_FPU) && \
(!defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_ARMV7M_LAZYFPU))
/****************************************************************************
* Pre-processor Definitions
@ -114,4 +115,4 @@ void up_copyarmstate(uint32_t *dest, uint32_t *src)
}
}
#endif /* CONFIG_ARCH_FPU && !CONFIG_ARMV7M_CMNVECTOR */
#endif /* CONFIG_ARCH_FPU && (!CONFIG_ARMV7M_CMNVECTOR || CONFIG_ARMV7M_LAZYFPU) */

View File

@ -132,7 +132,8 @@ void up_initial_state(struct tcb_s *tcb)
#endif
#endif /* CONFIG_PIC */
#if defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_BUILD_PROTECTED)
#if (defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)) || \
defined(CONFIG_BUILD_PROTECTED)
/* All tasks start via a stub function in kernel space. So all
* tasks must start in privileged thread mode. If CONFIG_BUILD_PROTECTED
* is defined, then that stub function will switch to unprivileged
@ -141,14 +142,15 @@ void up_initial_state(struct tcb_s *tcb)
xcp->regs[REG_EXC_RETURN] = EXC_RETURN_PRIVTHR;
#endif /* CONFIG_ARMV7M_CMNVECTOR || CONFIG_BUILD_PROTECTED */
#endif /* (CONFIG_ARMV7M_CMNVECTOR && !CONFIG_ARMV7M_LAZYFPU) || CONFIG_BUILD_PROTECTED */
#if defined(CONFIG_ARMV7M_CMNVECTOR) && defined(CONFIG_ARCH_FPU)
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU) && \
defined(CONFIG_ARCH_FPU)
xcp->regs[REG_FPSCR] = 0; // XXX initial FPSCR should be configurable
xcp->regs[REG_FPReserved] = 0;
#endif /* CONFIG_ARMV7M_CMNVECTOR && CONFIG_ARCH_FPU */
#endif /* CONFIG_ARMV7M_CMNVECTOR && !CONFIG_ARMV7M_LAZYFPU && CONFIG_ARCH_FPU */
/* Enable or disable interrupts, based on user configuration */

View File

@ -0,0 +1,358 @@
/************************************************************************************************
* arch/arm/src/armv7-m/sam_vectors.S
*
* Copyright (C) 2009-2010, 2013-2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name NuttX nor the names of its contributors may be
* used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
************************************************************************************************/
/************************************************************************************************
* Included Files
************************************************************************************************/
#include <nuttx/config.h>
#include <arch/irq.h>
#include "exc_return.h"
#include "chip.h"
/************************************************************************************************
* Preprocessor Definitions
************************************************************************************************/
/* Configuration ********************************************************************************/
#ifdef CONFIG_ARCH_HIPRI_INTERRUPT
/* In kernel mode without an interrupt stack, this interrupt handler will set the MSP to the
* stack pointer of the interrupted thread. If the interrupted thread was a privileged
* thread, that will be the MSP otherwise it will be the PSP. If the PSP is used, then the
* value of the MSP will be invalid when the interrupt handler returns because it will be a
* pointer to an old position in the unprivileged stack. Then when the high priority
* interrupt occurs and uses this stale MSP, there will most likely be a system failure.
*
* If the interrupt stack is selected, on the other hand, then the interrupt handler will
* always set the the MSP to the interrupt stack. So when the high priority interrupt occurs,
* it will either use the MSP of the last privileged thread to run or, in the case of the
* nested interrupt, the interrupt stack if no privileged task has run.
*/
# if defined(CONFIG_BUILD_PROTECTED) && CONFIG_ARCH_INTERRUPTSTACK < 4
# error Interrupt stack must be used with high priority interrupts in kernel mode
# endif
/* Use the the BASEPRI to control interrupts is required if nested, high
* priority interrupts are supported.
*/
# ifndef CONFIG_ARMV7M_USEBASEPRI
# error CONFIG_ARMV7M_USEBASEPRI must be used with CONFIG_ARCH_HIPRI_INTERRUPT
# endif
#endif
/************************************************************************************************
* Global Symbols
************************************************************************************************/
.globl exception_common
.syntax unified
.thumb
.file "up_lazyexception.S"
/************************************************************************************************
* .text
************************************************************************************************/
/* Common IRQ handling logic. On entry here, the return stack is on either
* the PSP or the MSP and looks like the following:
*
* REG_XPSR
* REG_R15
* REG_R14
* REG_R12
* REG_R3
* REG_R2
* REG_R1
* MSP->REG_R0
*
* And
* IPSR contains the IRQ number
* R14 Contains the EXC_RETURN value
* We are in handler mode and the current SP is the MSP
*/
.text
.type exception_common, function
exception_common:
/* Get the IRQ number from the IPSR */
mrs r0, ipsr /* R0=exception number */
/* Complete the context save */
#ifdef CONFIG_BUILD_PROTECTED
/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
* (handler mode) if the stack is on the MSP. It can only be on the PSP if
* EXC_RETURN is 0xfffffffd (unprivileged thread)
*/
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 1f /* Branch if context already on the MSP */
mrs r1, psp /* R1=The process stack pointer (PSP) */
mov sp, r1 /* Set the MSP to the PSP */
1:
#endif
/* r1 holds the value of the stack pointer AFTER the exception handling logic
* pushed the various registers onto the stack. Get r2 = the value of the
* stack pointer BEFORE the interrupt modified it.
*/
mov r2, sp /* R2=Copy of the main/process stack pointer */
add r2, #HW_XCPT_SIZE /* R2=MSP/PSP before the interrupt was taken */
#ifdef CONFIG_ARMV7M_USEBASEPRI
mrs r3, basepri /* R3=Current BASEPRI setting */
#else
mrs r3, primask /* R3=Current PRIMASK setting */
#endif
#ifdef CONFIG_ARCH_FPU
/* Skip over the block of memory reserved for floating pointer register save.
* Lazy FPU register saving is used. FPU registers will be saved in this
* block only if a context switch occurs (this means, of course, that the FPU
* cannot be used in interrupt processing).
*/
sub sp, #(4*SW_FPU_REGS)
#endif
/* Save the remaining registers on the stack after the registers pushed
* by the exception handling logic. r2=SP and r3=primask or basepri, r4-r11,
* r14=register values.
*/
#ifdef CONFIG_BUILD_PROTECTED
stmdb sp!, {r2-r11,r14} /* Save the remaining registers plus the SP value */
#else
stmdb sp!, {r2-r11} /* Save the remaining registers plus the SP value */
#endif
#ifndef CONFIG_ARCH_HIPRI_INTERRUPT
/* Disable interrupts, select the stack to use for interrupt handling
* and call up_doirq to handle the interrupt
*/
cpsid i /* Disable further interrupts */
#else
/* Set the BASEPRI register so that further normal interrupts will be
* masked. Nested, high priority may still occur, however.
*/
mov r2, #NVIC_SYSH_DISABLE_PRIORITY
msr basepri, r2 /* Set the BASEPRI */
#endif
/* There are two arguments to up_doirq:
*
* R0 = The IRQ number
* R1 = The top of the stack points to the saved state
*/
mov r1, sp
#if CONFIG_ARCH_INTERRUPTSTACK > 3
/* If CONFIG_ARCH_INTERRUPTSTACK is defined, we will set the MSP to use
* a special special interrupt stack pointer. The way that this is done
* here prohibits nested interrupts without some additional logic!
*/
ldr sp, =g_intstackbase
str r1, [sp, #-4]! /* Save the MSP on the interrupt stack */
bl up_doirq /* R0=IRQ, R1=register save (msp) */
ldr r1, [sp, #+4]! /* Recover R1=main stack pointer */
#else
/* Otherwise, we will re-use the interrupted thread's stack. That may
* mean using either MSP or PSP stack for interrupt level processing (in
* kernel mode).
*/
bl up_doirq /* R0=IRQ, R1=register save (msp) */
mov r1, sp /* Recover R1=main stack pointer */
#endif
/* On return from up_doirq, R0 will hold a pointer to register context
* array to use for the interrupt return. If that return value is the same
* as current stack pointer, then things are relatively easy.
*/
cmp r0, r1 /* Context switch? */
beq 2f /* Branch if no context switch */
/* We are returning with a pending context switch.
*
* If the FPU is enabled, then we will need to restore FPU registers.
* This is not done in normal interrupt save/restore because the cost
* is prohibitive. This is only done when switching contexts. A
* consequence of this is that floating point operations may not be
* performed in interrupt handling logic.
*
* Here:
* r0 = Address of the register save area
* NOTE: It is a requirement that up_restorefpu() preserve the value of
* r0!
*/
#ifdef CONFIG_ARCH_FPU
bl up_restorefpu /* Restore the FPU registers */
#endif
/* We are returning with a pending context switch. This case is different
* because in this case, the register save structure does not lie in the
* stack but, rather, within a TCB structure. We'll have to copy some
* values to the stack.
*/
add r1, r0, #SW_XCPT_SIZE /* R1=Address of HW save area in reg array */
ldmia r1, {r4-r11} /* Fetch eight registers in HW save area */
ldr r1, [r0, #(4*REG_SP)] /* R1=Value of SP before interrupt */
stmdb r1!, {r4-r11} /* Store eight registers in HW save area */
#ifdef CONFIG_BUILD_PROTECTED
ldmia r0, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r0, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
b 3f /* Re-join common logic */
/* We are returning with no context switch. We simply need to "unwind"
* the same stack frame that we created
*
* Here:
* r1 = Address of the return stack (same as r0)
*/
2:
#ifdef CONFIG_BUILD_PROTECTED
ldmia r1!, {r2-r11,r14} /* Recover R4-R11, r14 + 2 temp values */
#else
ldmia r1!, {r2-r11} /* Recover R4-R11 + 2 temp values */
#endif
#ifdef CONFIG_ARCH_FPU
/* Skip over the block of memory reserved for floating pointer register
* save. Then R1 is the address of the HW save area
*/
add r1, #(4*SW_FPU_REGS)
#endif
/* Set up to return from the exception
*
* Here:
* r1 = Address on the target thread's stack position at the start of
* the registers saved by hardware
* r3 = primask or basepri
* r4-r11 = restored register values
*/
3:
#ifdef CONFIG_BUILD_PROTECTED
/* The EXC_RETURN value will be 0xfffffff9 (privileged thread) or 0xfffffff1
* (handler mode) if the stack is on the MSP. It can only be on the PSP if
* EXC_RETURN is 0xfffffffd (unprivileged thread)
*/
mrs r2, control /* R2=Contents of the control register */
tst r14, #EXC_RETURN_PROCESS_STACK /* nonzero if context on process stack */
beq 4f /* Branch if privileged */
orr r2, r2, #1 /* Unprivileged mode */
msr psp, r1 /* R1=The process stack pointer */
b 5f
4:
bic r2, r2, #1 /* Privileged mode */
msr msp, r1 /* R1=The main stack pointer */
5:
msr control, r2 /* Save the updated control register */
#else
msr msp, r1 /* Recover the return MSP value */
/* Preload r14 with the special return value first (so that the return
* actually occurs with interrupts still disabled).
*/
ldr r14, =EXC_RETURN_PRIVTHR /* Load the special value */
#endif
/* Restore the interrupt state */
#ifdef CONFIG_ARMV7M_USEBASEPRI
msr basepri, r3 /* Restore interrupts priority masking */
#ifndef CONFIG_ARCH_HIPRI_INTERRUPT
cpsie i /* Re-enable interrupts */
#endif
#else
msr primask, r3 /* Restore interrupts */
#endif
/* Always return with R14 containing the special value that will: (1)
* return to thread mode, and (2) continue to use the MSP
*/
bx r14 /* And return */
.size handlers, .-handlers
/************************************************************************************************
* Name: g_intstackalloc/g_intstackbase
*
* Description:
* Shouldn't happen
*
************************************************************************************************/
#if CONFIG_ARCH_INTERRUPTSTACK > 3
.bss
.global g_intstackalloc
.global g_intstackbase
.align 4
g_intstackalloc:
.skip (CONFIG_ARCH_INTERRUPTSTACK & ~3)
g_intstackbase:
.size g_intstackalloc, .-g_intstackalloc
#endif
.end

View File

@ -1,7 +1,7 @@
/****************************************************************************
* arch/arm/src/armv7-m/up_svcall.c
*
* Copyright (C) 2009, 2011-2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2009, 2011-2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -206,7 +206,8 @@ int up_svcall(int irq, FAR void *context)
{
DEBUGASSERT(regs[REG_R1] != 0);
memcpy((uint32_t*)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
#if defined(CONFIG_ARCH_FPU) && !defined(CONFIG_ARMV7M_CMNVECTOR)
#if defined(CONFIG_ARCH_FPU) && \
(!defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_ARMV7M_LAZYFPU))
up_savefpu((uint32_t*)regs[REG_R1]);
#endif
}
@ -254,7 +255,8 @@ int up_svcall(int irq, FAR void *context)
{
DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0);
memcpy((uint32_t*)regs[REG_R1], regs, XCPTCONTEXT_SIZE);
#if defined(CONFIG_ARCH_FPU) && !defined(CONFIG_ARMV7M_CMNVECTOR)
#if defined(CONFIG_ARCH_FPU) && \
(!defined(CONFIG_ARMV7M_CMNVECTOR) || defined(CONFIG_ARMV7M_LAZYFPU))
up_savefpu((uint32_t*)regs[REG_R1]);
#endif
current_regs = (uint32_t*)regs[REG_R2];

View File

@ -51,7 +51,11 @@ CMN_CSRCS += up_sigdeliver.c up_stackframe.c up_svcall.c up_systemreset.c
CMN_CSRCS += up_udelay.c up_unblocktask.c up_usestack.c up_vfork.c
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
ifeq ($(CONFIG_ARMV7M_ARMV7M_LAZYFPU),y)
CMN_ASRCS += up_lazyexception.S
else
CMN_ASRCS += up_exception.S
endif
CMN_CSRCS += up_vectors.c
endif

View File

@ -1,7 +1,7 @@
/****************************************************************************
* arch/arm/src/efm32/efm32_start.c
*
* Copyright (C) 2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2014-2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -121,7 +121,7 @@ static void go_os_start(void *pv, unsigned int nbytes)
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
static inline void efm32_fpuconfig(void)
{

View File

@ -58,7 +58,11 @@ CMN_CSRCS += up_sigdeliver.c up_stackframe.c up_unblocktask.c up_usestack.c
CMN_CSRCS += up_doirq.c up_hardfault.c up_svcall.c up_checkstack.c up_vfork.c
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
ifeq ($(CONFIG_ARMV7M_ARMV7M_LAZYFPU),y)
CMN_ASRCS += up_lazyexception.S
else
CMN_ASRCS += up_exception.S
endif
CMN_CSRCS += up_vectors.c
endif

View File

@ -2,7 +2,7 @@
* arch/arm/src/lpc17xx/lpc17_start.c
* arch/arm/src/chip/lpc17_start.c
*
* Copyright (C) 2010, 2012-2013 Gregory Nutt. All rights reserved.
* Copyright (C) 2010, 2012-2013, 2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -112,7 +112,7 @@
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
static inline void lpc17_fpuconfig(void)
{

View File

@ -48,7 +48,11 @@ CMN_CSRCS += up_unblocktask.c up_usestack.c up_doirq.c up_hardfault.c
CMN_CSRCS += up_svcall.c up_vfork.c
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
ifeq ($(CONFIG_ARMV7M_ARMV7M_LAZYFPU),y)
CMN_ASRCS += up_lazyexception.S
else
CMN_ASRCS += up_exception.S
endif
CMN_CSRCS += up_vectors.c
endif

View File

@ -2,7 +2,7 @@
* arch/arm/src/lpc43xx/lpc43_start.c
* arch/arm/src/chip/lpc43_start.c
*
* Copyright (C) 2012 Gregory Nutt. All rights reserved.
* Copyright (C) 2012, 2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -187,7 +187,7 @@ static inline void lpc43_enabuffering(void)
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
static inline void lpc43_fpuconfig(void)
{

View File

@ -35,7 +35,11 @@
# The start-up, "head", file
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
HEAD_ASRC =
else
HEAD_ASRC = sam_vectors.S
endif
# Common ARM and Cortex-M3 files
@ -54,6 +58,15 @@ CMN_CSRCS += up_doirq.c up_hardfault.c up_svcall.c up_vfork.c
# Configuration-dependent common files
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
ifeq ($(CONFIG_ARMV7M_ARMV7M_LAZYFPU),y)
CMN_ASRCS += up_lazyexception.S
else
CMN_ASRCS += up_exception.S
endif
CMN_CSRCS += up_vectors.c
endif
ifeq ($(CONFIG_ARCH_RAMVECTORS),y)
CMN_CSRCS += up_ramvec_initialize.c up_ramvec_attach.c
endif
@ -74,6 +87,13 @@ ifeq ($(CONFIG_ELF),y)
CMN_CSRCS += up_elf.c
endif
ifeq ($(CONFIG_ARCH_FPU),y)
CMN_ASRCS += up_fpu.S
ifneq ($(CONFIG_ARMV7M_CMNVECTOR),y)
CMN_CSRCS += up_copyarmstate.c
endif
endif
ifeq ($(CONFIG_STACK_COLORATION),y)
CMN_CSRCS += up_checkstack.c
endif
@ -86,6 +106,10 @@ CHIP_CSRCS += sam_serial.c sam_start.c
# Configuration-dependent SAM3/4 files
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
CHIP_ASRCS += sam_vectors.S
endif
ifneq ($(CONFIG_SCHED_TICKLESS),y)
CHIP_CSRCS += sam_timerisr.c
endif

View File

@ -118,7 +118,7 @@ void __start(void) __attribute__ ((no_instrument_function));
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
static inline void sam_fpuconfig(void)
{

View File

@ -59,7 +59,11 @@ CMN_CSRCS += up_stackcheck.c
endif
ifeq ($(CONFIG_ARMV7M_CMNVECTOR),y)
ifeq ($(CONFIG_ARMV7M_ARMV7M_LAZYFPU),y)
CMN_ASRCS += up_lazyexception.S
else
CMN_ASRCS += up_exception.S
endif
CMN_CSRCS += up_vectors.c
endif

View File

@ -2,7 +2,7 @@
* arch/arm/src/stm32/stm32_start.c
* arch/arm/src/chip/stm32_start.c
*
* Copyright (C) 2009, 2011-2014 Gregory Nutt. All rights reserved.
* Copyright (C) 2009, 2011-2015 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -122,7 +122,7 @@ void __start(void) __attribute__ ((no_instrument_function));
****************************************************************************/
#ifdef CONFIG_ARCH_FPU
#ifdef CONFIG_ARMV7M_CMNVECTOR
#if defined(CONFIG_ARMV7M_CMNVECTOR) && !defined(CONFIG_ARMV7M_LAZYFPU)
static inline void stm32_fpuconfig(void)
{