[Stylecheck] Correct style in the CM3 addition

This commit is contained in:
BuFran 2013-09-12 10:08:18 +02:00 committed by Piotr Esden-Tempski
parent 8e96592f55
commit aa9b94ee1d
1 changed files with 90 additions and 90 deletions

View File

@ -1,77 +1,77 @@
/** @defgroup CM3_cortex_defines Cortex Core Defines
*
* @brief <b>libopencm3 Defined Constants and Types for the Cortex Core </b>
*
* @ingroup CM3_defines
*
* @version 1.0.0
*
* LGPL License Terms @ref lgpl_license
/** @defgroup CM3_cortex_defines Cortex Core Defines
*
* @brief <b>libopencm3 Defined Constants and Types for the Cortex Core </b>
*
* @ingroup CM3_defines
*
* @version 1.0.0
*
* LGPL License Terms @ref lgpl_license
*/
/*
* This file is part of the libopencm3 project.
*
/*
* This file is part of the libopencm3 project.
*
* Copyright (C) 2013 Ben Gamari <bgamari@gmail.com>
* Copyright (C) 2013 Frantisek Burian <BuFran@seznam.cz>
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LIBOPENCM3_CORTEX_H
* Copyright (C) 2013 Frantisek Burian <BuFran@seznam.cz>
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef LIBOPENCM3_CORTEX_H
#define LIBOPENCM3_CORTEX_H
/**@{*/
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Enable interrupts
*
* Disable the interrupt mask and enable interrupts globally
*/
static inline void cm_enable_interrupts(void)
{
__asm__("CPSIE I\n");
}
*/
static inline void cm_enable_interrupts(void)
{
__asm__("CPSIE I\n");
}
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Disable interrupts
*
* Mask all interrupts globally
*/
static inline void cm_disable_interrupts(void)
{
__asm__("CPSID I\n");
*/
static inline void cm_disable_interrupts(void)
{
__asm__("CPSID I\n");
}
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Enable faults
*
* Disable the HardFault mask and enable fault interrupt globally
*/
static inline void cm_enable_faults(void)
{
__asm__("CPSIE F\n");
}
*/
static inline void cm_enable_faults(void)
{
__asm__("CPSIE F\n");
}
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Disable faults
*
* Mask the HardFault interrupt globally
*/
static inline void cm_disable_faults(void)
{
__asm__("CPSID F\n");
}
*/
static inline void cm_disable_faults(void)
{
__asm__("CPSID F\n");
}
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Check if interrupts are masked
@ -79,13 +79,13 @@ static inline void cm_disable_faults(void)
* Checks, if interrupts are masked (disabled).
*
* @returns true, if interrupts are disabled.
*/
__attribute__(( always_inline ))
static inline bool cm_is_masked_interrupts(void)
{
register uint32_t result;
__asm__ ("MRS %0, PRIMASK" : "=r" (result) );
return (result);
*/
__attribute__((always_inline))
static inline bool cm_is_masked_interrupts(void)
{
register uint32_t result;
__asm__ ("MRS %0, PRIMASK" : "=r" (result));
return result;
}
/*---------------------------------------------------------------------------*/
@ -95,12 +95,12 @@ static inline bool cm_is_masked_interrupts(void)
*
* @returns bool true, if HardFault interrupt is disabled.
*/
__attribute__(( always_inline ))
static inline bool cm_is_masked_faults(void)
{
register uint32_t result;
__asm__ ("MRS %0, FAULTMASK" : "=r" (result) );
return (result);
__attribute__((always_inline))
static inline bool cm_is_masked_faults(void)
{
register uint32_t result;
__asm__ ("MRS %0, FAULTMASK" : "=r" (result));
return result;
}
/*---------------------------------------------------------------------------*/
@ -112,16 +112,16 @@ static inline bool cm_is_masked_faults(void)
*
* @param[in] mask bool New state of the interrupt mask
* @returns bool old state of the interrupt mask
*/
__attribute__(( always_inline ))
static inline bool cm_mask_interrupts(bool mask)
{
register bool old;
*/
__attribute__((always_inline))
static inline bool cm_mask_interrupts(bool mask)
{
register bool old;
__asm__ __volatile__("MRS %0, PRIMASK" : "=r" (old));
__asm__ __volatile__("" ::: "memory");
__asm__ __volatile__("MSR PRIMASK, %0" : : "r" (mask));
return old;
}
__asm__ __volatile__("" : : : "memory");
__asm__ __volatile__("MSR PRIMASK, %0" : : "r" (mask));
return old;
}
/*---------------------------------------------------------------------------*/
/** @brief Cortex M Mask HardFault interrupt
@ -133,23 +133,23 @@ static inline bool cm_mask_interrupts(bool mask)
* @param[in] mask bool New state of the HardFault interrupt mask
* @returns bool old state of the HardFault interrupt mask
*/
__attribute__(( always_inline ))
static inline bool cm_mask_faults(bool mask)
{
register bool old;
__attribute__((always_inline))
static inline bool cm_mask_faults(bool mask)
{
register bool old;
__asm__ __volatile__ ("MRS %0, FAULTMASK" : "=r" (old));
__asm__ __volatile__ ("" ::: "memory");
__asm__ __volatile__ ("MSR FAULTMASK, %0" : : "r" (mask));
return old;
__asm__ __volatile__ ("" : : : "memory");
__asm__ __volatile__ ("MSR FAULTMASK, %0" : : "r" (mask));
return old;
}
/**@}*/
/*===========================================================================*/
/** @defgroup CM3_cortex_atomic_defines Cortex Core Atomic support Defines
*
* @brief Atomic operation support
*
/** @defgroup CM3_cortex_atomic_defines Cortex Core Atomic support Defines
*
* @brief Atomic operation support
*
* @ingroup CM3_cortex_defines
*/
/**@{*/
@ -214,7 +214,7 @@ static inline bool __cm_atomic_set(bool* val)
#define CM_ATOMIC_BLOCK()
#else /* defined(__DOXYGEN__) */
#define CM_ATOMIC_BLOCK() \
for (bool ___CM_SAVER(true), __My = true; __My; __My = false)
for (bool ___CM_SAVER(true), __my = true; __my; __my = false)
#endif /* defined(__DOXYGEN__) */
/*---------------------------------------------------------------------------*/
@ -241,7 +241,7 @@ static inline bool __cm_atomic_set(bool* val)
* ...
*
* for (int i=0;i < 100; i++) {
* CM_ATOMIC_CONTEXT(); // interrupts are masked in this block
* CM_ATOMIC_CONTEXT(); // interrupts are masked in this block
* value += 100; // access value as atomic
* if ((value % 16) == 0) {
* break; // restore interrupts and break cycle
@ -258,7 +258,7 @@ static inline bool __cm_atomic_set(bool* val)
*
* uint64_t getnextval(void)
* {
* CM_ATOMIC_CONTEXT(); // interrupts are masked in this block
* CM_ATOMIC_CONTEXT(); // interrupts are masked in this block
* value = value + 3; // do long atomic operation
* return value; // interrupts is restored automatically
* }
@ -268,10 +268,10 @@ static inline bool __cm_atomic_set(bool* val)
#define CM_ATOMIC_CONTEXT()
#else /* defined(__DOXYGEN__) */
#define CM_ATOMIC_CONTEXT() bool __CM_SAVER(true)
#endif /* defined(__DOXYGEN__) */
#endif /* defined(__DOXYGEN__) */
/**@}*/
#endif
#endif