9
0
Fork 0

Rename _TCB to struct tcb_s

git-svn-id: svn://svn.code.sf.net/p/nuttx/code/trunk@5610 42af7a65-404d-4744-a932-0658087f49c3
This commit is contained in:
patacongo 2013-02-04 18:46:28 +00:00
parent 09afe06037
commit 7071ca9d21
296 changed files with 841 additions and 842 deletions

View File

@ -513,4 +513,6 @@
* Type of argv has changed from const char ** to char * const *
* apps/nshlib/nsh_parse.c: Fix memory lead: Need to detach after
creating a pthread.
* apps/examples and nshlib: Change name of _TCB to struct tcb_s to
match NuttX name change.

View File

@ -1,7 +1,7 @@
/****************************************************************************
* examples/thttpd/tasks/tasks.c
*
* Copyright (C) 2009, 2011 Gregory Nutt. All rights reserved.
* Copyright (C) 2009, 2011, 2013 Gregory Nutt. All rights reserved.
* Author: Gregory Nutt <gnutt@nuttx.org>
*
* Redistribution and use in source and binary forms, with or without
@ -103,7 +103,7 @@ static const char *g_ttypenames[4] =
* dangerous to reference global variables in the callback function.
*/
/* static */ void show_task(FAR _TCB *tcb, FAR void *arg)
/* static */ void show_task(FAR struct tcb_s *tcb, FAR void *arg)
{
int i;

View File

@ -110,7 +110,7 @@ static const char *g_ttypenames[4] =
****************************************************************************/
#ifndef CONFIG_NSH_DISABLE_PS
static void ps_task(FAR _TCB *tcb, FAR void *arg)
static void ps_task(FAR struct tcb_s *tcb, FAR void *arg)
{
struct nsh_vtbl_s *vtbl = (struct nsh_vtbl_s*)arg;
#if CONFIG_MAX_TASK_ARGS > 2

View File

@ -4121,4 +4121,7 @@
the task group structures. Now all message queues opened by
members of the group are closed when the last member of the group
exits.
* includes/nuttx/sched.h and Lots of files: Change name of _TCB to
struct tcb_s so that (1) it is consitent with other NuttX naming and
so that (2) the naming can handle some upcoming changes.

View File

@ -8,7 +8,7 @@
<tr align="center" bgcolor="#e4e4e4">
<td>
<h1><big><font color="#3c34ec"><i>On-Demand Paging</i></font></big></h1>
<p>Last Updated: August 12, 2010</p>
<p>Last Updated: February 4, 2010</p>
</td>
</tr>
</table>
@ -306,7 +306,7 @@
</p>
<p>
The page fill worker thread will maintain a static variable called <code>_TCB *g_pftcb</code>.
The page fill worker thread will maintain a static variable called <code>struct tcb_s *g_pftcb</code>.
If no fill is in progress, <code>g_pftcb</code> will be NULL.
Otherwise, it will point to the TCB of the task which is receiving the fill that is in progess.
</p>
@ -619,7 +619,7 @@
</p>
<ul><dl>
<dt>
<code>void up_block_task(FAR _TCB *tcb, tstate_t task_state);</code>
<code>void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state);</code>
</dt>
<dd>
The currently executing task at the head of the ready to run list must be stopped.
@ -628,7 +628,7 @@
page fill, and to
</dd>
<dt>
<code>void up_unblock_task(FAR _TCB *tcb);</code>
<code>void up_unblock_task(FAR struct tcb_s *tcb);</code>
</dt>
<dd>
A task is currently in an inactive task list but has been prepped to execute.
@ -643,7 +643,7 @@
<ul><dl>
<dt>
<code>int up_checkmapping(FAR _TCB *tcb);</code>
<code>int up_checkmapping(FAR struct tcb_s *tcb);</code>
</dt>
<dd>
The function <code>up_checkmapping()</code> returns an indication if the page fill still needs to performed or not.
@ -651,7 +651,7 @@
This function will prevent the same page from be filled multiple times.
</dd>
<dt>
<code>int up_allocpage(FAR _TCB *tcb, FAR void *vpage);</code>
<code>int up_allocpage(FAR struct tcb_s *tcb, FAR void *vpage);</code>
</dt>
<dd>
This architecture-specific function will set aside page in memory and map to its correct virtual address.
@ -661,7 +661,7 @@
NOTE: This function must <i>always</i> return a page allocation.
If all available pages are in-use (the typical case), then this function will select a page in-use, un-map it, and make it available.
</dd>
<dt><code>int up_fillpage(FAR _TCB *tcb, FAR const void *vpage, void (*pg_callback)(FAR _TCB *tcb, int result));</code>
<dt><code>int up_fillpage(FAR struct tcb_s *tcb, FAR const void *vpage, void (*pg_callback)(FAR struct tcb_s *tcb, int result));</code>
</dt>
The actual filling of the page with data from the non-volatile, must be performed by a separate call to the architecture-specific function, <code>up_fillpage()</code>.
This will start asynchronous page fill.

View File

@ -12,7 +12,7 @@
<h1><big><font color="#3c34ec">
<i>NuttX RTOS Porting Guide</i>
</font></big></h1>
<p>Last Updated: January 23, 2013</p>
<p>Last Updated: February 4, 2013</p>
</td>
</tr>
</table>
@ -1590,7 +1590,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</p>
<h3><a name="upinitialstate">4.1.3 <code>up_initial_state()</code></a></h3>
<p><b>Prototype</b>: <code>void up_initial_state(FAR _TCB *tcb);</code></p>
<p><b>Prototype</b>: <code>void up_initial_state(FAR struct tcb_s *tcb);</code></p>
<p><b>Description</b>.
A new thread is being started and a new TCB has been created.
@ -1613,7 +1613,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</p>
<h3><a name="upcreatestack">4.1.4 <code>up_create_stack()</code></a></h3>
<p><b>Prototype</b>: <code>STATUS up_create_stack(FAR _TCB *tcb, size_t stack_size);</code></p>
<p><b>Prototype</b>: <code>STATUS up_create_stack(FAR struct tcb_s *tcb, size_t stack_size);</code></p>
<p><b>Description</b>.
Allocate a stack for a new thread and setup
@ -1648,7 +1648,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
<h3><a name="upusestack">4.1.5 <code>up_use_stack()</code></a></h3>
<p><b>Prototype</b>:
<code>STATUS up_use_stack(FAR _TCB *tcb, FAR void *stack, size_t stack_size);</code>
<code>STATUS up_use_stack(FAR struct tcb_s *tcb, FAR void *stack, size_t stack_size);</code>
</p>
<p><b>Description</b>.
@ -1682,7 +1682,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</ul>
<h3><a name="upreleasestack">4.1.6 <code>up_release_stack()</code></a></h3>
<p><b>Prototype</b>: <code>void up_release_stack(FAR _TCB *dtcb);</code></p>
<p><b>Prototype</b>: <code>void up_release_stack(FAR struct tcb_s *dtcb);</code></p>
<p><b>Description</b>.
A task has been stopped. Free all stack
@ -1694,7 +1694,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</p>
<h3><a name="upunblocktask">4.1.7 <code>up_unblock_task()</code></a></h3>
<p><b>Prototype</b>: <code>void up_unblock_task(FAR _TCB *tcb);</code></p>
<p><b>Prototype</b>: <code>void up_unblock_task(FAR struct tcb_s *tcb);</code></p>
<p><b>Description</b>.
A task is currently in an inactive task list
@ -1717,7 +1717,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</ul>
<h3><a name="upblocktask">4.1.8 <code>up_block_task()</code></a></h3>
<p><b>Prototype</b>: <code>void up_block_task(FAR _TCB *tcb, tstate_t task_state);</code></p>
<p><b>Prototype</b>: <code>void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state);</code></p>
<p><b>Description</b>.
The currently executing task at the head of
@ -1760,7 +1760,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
</p>
<h3><a name="upreprioritizertr">4.1.10 <code>up_reprioritize_rtr()</code></a></h3>
<p><b>Prototype</b>: <code>void up_reprioritize_rtr(FAR _TCB *tcb, uint8_t priority);</code></p>
<p><b>Prototype</b>: <code>void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority);</code></p>
<p><b>Description</b>.
Called when the priority of a running or
@ -1821,7 +1821,7 @@ The system can be re-made subsequently by just typing <code>make</code>.
<h3><a name="upschedulesigaction">4.1.13 <code>up_schedule_sigaction()</code></a></h3>
<p><b>Prototype</b>:
<code>void up_schedule_sigaction(FAR _TCB *tcb, sig_deliver_t sigdeliver);</code>
<code>void up_schedule_sigaction(FAR struct tcb_s *tcb, sig_deliver_t sigdeliver);</code>
</p>
<p><b>Description</b>.
@ -2297,7 +2297,7 @@ else
<h4><a name="up_addrenv_assign">4.1.21.6 <code>up_addrenv_assign()</code></a></h4>
<p><b>Prototype</b>:<p>
<ul>
<code>int up_addrenv_assign(task_addrenv_t addrenv, FAR _TCB *tcb);</code>
<code>int up_addrenv_assign(task_addrenv_t addrenv, FAR struct tcb_s *tcb);</code>
</ul>
<p><b>Description</b>:</p>
<ul>
@ -2316,7 +2316,7 @@ else
<h4><a name="up_addrenv_share">4.1.21.7 <code>up_addrenv_share()</code></a></h4>
<p><b>Prototype</b>:<p>
<ul>
<code>int up_addrenv_share(FAR const _TCB *ptcb, FAR _TCB *ctcb);</code>
<code>int up_addrenv_share(FAR const struct tcb_s *ptcb, FAR struct tcb_s *ctcb);</code>
</ul>
<p><b>Description</b>:</p>
<ul>
@ -2336,7 +2336,7 @@ else
<h4><a name="up_addrenv_release">4.1.21.8 <code>up_addrenv_release()</code></a></h4>
<p><b>Prototype</b>:<p>
<ul>
<code>int up_addrenv_release(FAR _TCB *tcb);</code>
<code>int up_addrenv_release(FAR struct tcb_s *tcb);</code>
</ul>
<p><b>Description</b>:</p>
<ul>

View File

@ -13,7 +13,7 @@
<h1><big><font color="#3c34ec"><i>NuttX Operating System<p>User's Manual</i></font></big></h1>
<p><small>by</small></p>
<p>Gregory Nutt<p>
<p>Last Updated: February 2, 2013</p>
<p>Last Updated: February 4, 2013</p>
</td>
</tr>
</table>
@ -342,7 +342,7 @@ VxWorks provides the following similar interface:
<b>Function Prototype:</b>
<pre>
#include &lt;sched.h&gt;
int task_init(_TCB *tcb, char *name, int priority, uint32_t *stack, uint32_t stack_size,
int task_init(struct tcb_s *tcb, char *name, int priority, uint32_t *stack, uint32_t stack_size,
maint_t entry, char * const argv[]);
</pre>
@ -414,7 +414,7 @@ VxWorks provides the following similar interface:
<b>Function Prototype:</b>
<pre>
#include &lt;sched.h&gt;
int task_activate(_TCB *tcb);
int task_activate(struct tcb_s *tcb);
</pre>
<p>
@ -9188,7 +9188,7 @@ From the standpoint of the application, these structures (and
structure pointers) should be treated as simple handles to reference
OS resources. These hidden structures include:
<ul>
<li>_TCB
<li>struct tcb_s
<li>mqd_t
<li>sem_t
<li>WDOG_ID

View File

@ -198,8 +198,8 @@ o Task/Scheduler (sched/)
bug-for-bug compatibility, the same errno should be shared by
the task and each thread that it creates. It is *very* easy
to make this change: Just move the pterrno field from
_TCB to struct task_group_s. However, I am still not sure
if this should be done or not.
struct tcb_s to struct task_group_s. However, I am still not
sure if this should be done or not.
Status: Closed. The existing solution is better (although its
incompatibilities could show up in porting some code).
Priority: Low

View File

@ -70,7 +70,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (g_irqtos || ((FAR _TCB*)g_readytorun.head)->pid == 0)
if (g_irqtos || ((FAR struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -100,7 +100,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#if CONFIG_TASK_NAME_SIZE > 0
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -124,7 +124,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#if CONFIG_TASK_NAME_SIZE > 0
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -83,7 +83,7 @@
*
************************************************************************/
void up_block_task(FAR _TCB *tcb, tstate_t task_state)
void up_block_task(FAR struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -94,7 +94,7 @@ void up_block_task(FAR _TCB *tcb, tstate_t task_state)
}
else
{
FAR _TCB *rtcb = (FAR _TCB*)g_readytorun.head;
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
bool switch_needed;
dbg("Blocking TCB=%p\n", tcb);
@ -139,7 +139,7 @@ void up_block_task(FAR _TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then setup so that the context will be performed on exit
@ -160,7 +160,7 @@ void up_block_task(FAR _TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -77,7 +77,7 @@
void _exit(int status)
{
FAR _TCB* tcb;
FAR struct tcb_s* tcb;
dbg("TCB=%p exitting\n", tcb);
@ -95,7 +95,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (FAR _TCB*)g_readytorun.head;
tcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", tcb);
/* Then switch contexts */

View File

@ -68,7 +68,7 @@ volatile uint8_t g_irqtos;
* during the interrupt handling, these registers will be
* copied into the TCB again (NOTE: We could save a copy
* if the interrupt handling logic saved the registers
* directly into (_TCB*)g_readytorun.head->xcp.regs).
* directly into (struct tcb_s*)g_readytorun.head->xcp.regs).
*/
uint8_t g_irqregs[REGS_SIZE];

View File

@ -74,7 +74,7 @@
*
************************************************************************/
void up_initial_state(FAR _TCB *tcb)
void up_initial_state(FAR struct tcb_s *tcb)
{
FAR uint8_t *frame = tcb->xcp.stack;
FAR uint8_t *regs = tcb->xcp.regs;

View File

@ -79,7 +79,7 @@ extern volatile uint8_t g_irqtos;
* during the interrupt handling, these registers will be
* copied into the TCB again (NOTE: We could save a copy
* if the interrupt handling logic saved the registers
* directly into (_TCB*)g_readytorun.head->xcp.regs).
* directly into (struct tcb_s*)g_readytorun.head->xcp.regs).
*/
extern uint8_t g_irqregs[REGS_SIZE];

View File

@ -76,7 +76,7 @@
void up_release_pending(void)
{
FAR _TCB *rtcb = (FAR _TCB*)g_readytorun.head;
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("From TCB=%p\n", rtcb);
@ -102,7 +102,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then setup so that the context will be performed on exit
@ -124,7 +124,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -85,7 +85,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(FAR _TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(FAR struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -103,7 +103,7 @@ void up_reprioritize_rtr(FAR _TCB *tcb, uint8_t priority)
}
else
{
FAR _TCB *rtcb = (FAR _TCB*)g_readytorun.head;
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
bool switch_needed;
dbg("TCB=%p PRI=%d\n", tcb, priority);
@ -157,7 +157,7 @@ void up_reprioritize_rtr(FAR _TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then setup so that the context will be performed on exit
@ -178,7 +178,7 @@ void up_reprioritize_rtr(FAR _TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -80,7 +80,7 @@
*
************************************************************************/
void up_unblock_task(FAR _TCB *tcb)
void up_unblock_task(FAR struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -91,7 +91,7 @@ void up_unblock_task(FAR _TCB *tcb)
}
else
{
FAR _TCB *rtcb = (FAR _TCB*)g_readytorun.head;
FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("Unblocking TCB=%p\n", tcb);
@ -131,7 +131,7 @@ void up_unblock_task(FAR _TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then setup so that the context will be performed on exit
@ -154,7 +154,7 @@ void up_unblock_task(FAR _TCB *tcb)
* g_readytorun task list.
*/
rtcb = (FAR _TCB*)g_readytorun.head;
rtcb = (FAR struct tcb_s*)g_readytorun.head;
dbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -165,7 +165,7 @@ static bool g_pgwrap;
*
****************************************************************************/
int up_allocpage(FAR _TCB *tcb, FAR void **vpage)
int up_allocpage(FAR struct tcb_s *tcb, FAR void **vpage)
{
uintptr_t vaddr;
uintptr_t paddr;

View File

@ -161,7 +161,7 @@ static inline void up_registerdump(void)
#ifdef CONFIG_ARCH_STACKDUMP
static void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t sp = up_getsp();
uint32_t ustackbase;
uint32_t ustacksize;
@ -257,7 +257,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -287,7 +287,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -309,7 +309,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -95,7 +95,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -138,7 +138,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -156,7 +156,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -97,7 +97,7 @@
*
****************************************************************************/
bool up_checkmapping(FAR _TCB *tcb)
bool up_checkmapping(FAR struct tcb_s *tcb)
{
uintptr_t vaddr;
uint32_t *pte;

View File

@ -102,9 +102,9 @@
#ifdef CONFIG_PAGING
void up_dataabort(uint32_t *regs, uint32_t far, uint32_t fsr)
{
FAR _TCB *tcb = (FAR _TCB *)g_readytorun.head;
FAR struct tcb_s *tcb = (FAR struct tcb_s *)g_readytorun.head;
#ifdef CONFIG_PAGING
uint32_t *savestate;
uint32_t *savestate;
/* Save the saved processor context in current_regs where it can be accessed
* for register dumps and possibly context switching.

View File

@ -78,7 +78,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;
uint32_t cpsr;

View File

@ -121,7 +121,7 @@ void up_prefetchabort(uint32_t *regs)
* prefetch and data aborts.
*/
FAR _TCB *tcb = (FAR _TCB *)g_readytorun.head;
FAR struct tcb_s *tcb = (FAR struct tcb_s *)g_readytorun.head;
tcb->xcp.far = regs[REG_R15];
/* Call pg_miss() to schedule the page fill. A consequences of this

View File

@ -75,7 +75,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -101,7 +101,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -121,7 +121,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -102,7 +102,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -156,7 +156,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -175,7 +175,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -101,7 +101,7 @@
*
****************************************************************************/
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
@ -121,7 +121,7 @@ void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
if (tcb == (_TCB*)g_readytorun.head)
if (tcb == (struct tcb_s*)g_readytorun.head)
{
/* CASE 1: We are not in an interrupt handler and
* a task is signalling itself for some reason.

View File

@ -81,7 +81,7 @@
void up_sigdeliver(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t regs[XCPTCONTEXT_REGS];
sig_deliver_t sigdeliver;

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -90,7 +90,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -128,7 +128,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -148,7 +148,7 @@ void up_unblock_task(_TCB *tcb)
* g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -167,8 +167,8 @@ static inline void up_registerdump(void)
#ifdef CONFIG_ARCH_STACKDUMP
static void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
uint32_t sp = up_getsp();
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t sp = up_getsp();
uint32_t ustackbase;
uint32_t ustacksize;
#if CONFIG_ARCH_INTERRUPTSTACK > 3
@ -272,7 +272,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -302,7 +302,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -324,7 +324,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -95,7 +95,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -138,7 +138,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -153,7 +153,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -81,7 +81,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;

View File

@ -75,7 +75,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -100,7 +100,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -116,7 +116,7 @@ void up_release_pending(void)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -102,7 +102,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -157,7 +157,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -173,7 +173,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -101,7 +101,7 @@
*
****************************************************************************/
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
@ -121,7 +121,7 @@ void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
if (tcb == (_TCB*)g_readytorun.head)
if (tcb == (struct tcb_s*)g_readytorun.head)
{
/* CASE 1: We are not in an interrupt handler and
* a task is signalling itself for some reason.

View File

@ -81,7 +81,7 @@
void up_sigdeliver(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t regs[XCPTCONTEXT_REGS];
sig_deliver_t sigdeliver;

View File

@ -109,9 +109,9 @@
#ifdef CONFIG_NUTTX_KERNEL
static inline void dispatch_syscall(uint32_t *regs)
{
uint32_t cmd = regs[REG_R0];
FAR _TCB *rtcb = sched_self();
uintptr_t ret = (uintptr_t)ERROR;
uint32_t cmd = regs[REG_R0];
FAR struct tcb_s *rtcb = sched_self();
uintptr_t ret = (uintptr_t)ERROR;
/* Verify the the SYS call number is within range */

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -90,7 +90,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -128,7 +128,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -143,7 +143,7 @@ void up_unblock_task(_TCB *tcb)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -78,7 +78,7 @@
*
****************************************************************************/
size_t up_check_tcbstack(FAR _TCB *tcb)
size_t up_check_tcbstack(FAR struct tcb_s *tcb)
{
FAR uint32_t *ptr;
size_t mark;
@ -136,12 +136,12 @@ size_t up_check_tcbstack(FAR _TCB *tcb)
size_t up_check_stack(void)
{
return up_check_tcbstack((FAR _TCB*)g_readytorun.head);
return up_check_tcbstack((FAR struct tcb_s*)g_readytorun.head);
}
size_t up_check_stack_remain(void)
{
return ((FAR _TCB*)g_readytorun.head)->adj_stack_size - up_check_tcbstack((FAR _TCB*)g_readytorun.head);
return ((FAR struct tcb_s*)g_readytorun.head)->adj_stack_size - up_check_tcbstack((FAR struct tcb_s*)g_readytorun.head);
}
#endif /* CONFIG_DEBUG && CONFIG_DEBUG_STACK */

View File

@ -128,7 +128,7 @@ static void *memset32(void *s, uint32_t c, size_t n)
* must be allocated.
****************************************************************************/
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
if (tcb->stack_alloc_ptr &&
tcb->adj_stack_size != stack_size)
@ -174,7 +174,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
size_of_stack = top_of_stack - (uint32_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -73,7 +73,7 @@
****************************************************************************/
#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
static void _up_dumponexit(FAR struct tcb_s *tcb, FAR void *arg)
{
#if CONFIG_NFILE_DESCRIPTORS > 0
FAR struct filelist *filelist;
@ -136,7 +136,7 @@ static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
void _exit(int status)
{
_TCB* tcb;
struct tcb_s* tcb;
/* Disable interrupts. They will be restored when the next
* task is started.
@ -159,7 +159,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (_TCB*)g_readytorun.head;
tcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -410,8 +410,8 @@ extern void up_rnginitialize(void);
#if defined(CONFIG_DEBUG) && defined(CONFIG_DEBUG_STACK)
extern size_t up_check_stack(void);
extern size_t up_check_tcbstack(FAR _TCB);
extern size_t up_check_tcbstack_remain(FAR _TCB);
extern size_t up_check_tcbstack(FAR struct tcb_s);
extern size_t up_check_tcbstack_remain(FAR struct tcb_s);
#endif
#endif /* __ASSEMBLY__ */

View File

@ -67,7 +67,7 @@
*
****************************************************************************/
void up_release_stack(_TCB *dtcb)
void up_release_stack(struct tcb_s *dtcb)
{
if (dtcb->stack_alloc_ptr)
{

View File

@ -109,7 +109,7 @@
*
****************************************************************************/
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
size_t top_of_stack;
size_t size_of_stack;
@ -146,7 +146,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
size_of_stack = top_of_stack - (uint32_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -126,8 +126,8 @@
pid_t up_vfork(const struct vfork_s *context)
{
_TCB *parent = (FAR _TCB *)g_readytorun.head;
_TCB *child;
struct tcb_s *parent = (FAR struct tcb_s *)g_readytorun.head;
struct tcb_s *child;
size_t stacksize;
uint32_t newsp;
uint32_t newfp;

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -95,7 +95,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -138,7 +138,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -153,7 +153,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -78,7 +78,7 @@
*
****************************************************************************/
size_t up_check_tcbstack(FAR _TCB *tcb)
size_t up_check_tcbstack(FAR struct tcb_s *tcb)
{
FAR uint8_t *ptr;
size_t mark;
@ -135,7 +135,7 @@ size_t up_check_tcbstack(FAR _TCB *tcb)
size_t up_check_stack(void)
{
return up_check_tcbstack((FAR _TCB*)g_readytorun.head);
return up_check_tcbstack((FAR struct tcb_s*)g_readytorun.head);
}
#endif /* CONFIG_DEBUG && CONFIG_DEBUG_STACK */

View File

@ -87,7 +87,7 @@
*
****************************************************************************/
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
/* Is there already a stack allocated of a different size? */
@ -137,7 +137,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
top_of_stack = (size_t)tcb->stack_alloc_ptr + stack_size - 1;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (FAR void *)top_of_stack;
tcb->adj_stack_size = stack_size;

View File

@ -168,8 +168,8 @@ static inline void up_registerdump(void)
void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
uint16_t sp = up_getsp();
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint16_t sp = up_getsp();
uint16_t ustackbase;
uint16_t ustacksize;
#if CONFIG_ARCH_INTERRUPTSTACK > 0

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;

View File

@ -75,7 +75,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -101,7 +101,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -117,7 +117,7 @@ void up_release_pending(void)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -102,7 +102,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -156,7 +156,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -172,7 +172,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -101,7 +101,7 @@
*
****************************************************************************/
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
@ -121,7 +121,7 @@ void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
if (tcb == (_TCB*)g_readytorun.head)
if (tcb == (struct tcb_s*)g_readytorun.head)
{
/* CASE 1: We are not in an interrupt handler and
* a task is signalling itself for some reason.

View File

@ -81,7 +81,7 @@
void up_sigdeliver(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint8_t regs[XCPTCONTEXT_REGS];
sig_deliver_t sigdeliver;

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -90,7 +90,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -128,7 +128,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -143,7 +143,7 @@ void up_unblock_task(_TCB *tcb)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -82,7 +82,7 @@
*
****************************************************************************/
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
size_t top_of_stack;
@ -115,7 +115,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
top_of_stack = (size_t)tcb->stack_alloc_ptr + stack_size - 1;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (FAR void *)top_of_stack;
tcb->adj_stack_size = stack_size;

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -95,7 +95,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -138,7 +138,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -153,7 +153,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -86,7 +86,7 @@
*
****************************************************************************/
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
if (tcb->stack_alloc_ptr &&
tcb->adj_stack_size != stack_size)
@ -124,7 +124,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
top_of_stack &= ~3;
size_of_stack = top_of_stack - (size_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (FAR void *)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -145,8 +145,8 @@ static inline void up_registerdump(void)
void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
uint32_t sp = up_getsp();
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t sp = up_getsp();
uint32_t ustackbase;
uint32_t ustacksize;
#if CONFIG_ARCH_INTERRUPTSTACK > 3

View File

@ -77,7 +77,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;

View File

@ -75,7 +75,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -101,7 +101,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -117,7 +117,7 @@ void up_release_pending(void)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -102,7 +102,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -156,7 +156,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -172,7 +172,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -101,7 +101,7 @@
*
****************************************************************************/
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
@ -121,7 +121,7 @@ void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
if (tcb == (_TCB*)g_readytorun.head)
if (tcb == (struct tcb_s*)g_readytorun.head)
{
/* CASE 1: We are not in an interrupt handler and
* a task is signalling itself for some reason.

View File

@ -81,7 +81,7 @@
void up_sigdeliver(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#if 0
uint32_t regs[XCPTCONTEXT_REGS+3]; /* Why +3? See below */
#else

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -90,7 +90,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -128,7 +128,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -143,7 +143,7 @@ void up_unblock_task(_TCB *tcb)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -82,7 +82,7 @@
*
****************************************************************************/
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
size_t top_of_stack;
size_t size_of_stack;
@ -113,7 +113,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
top_of_stack &= ~3;
size_of_stack = top_of_stack - (size_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (FAR void *)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -95,7 +95,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -125,7 +125,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -147,7 +147,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -73,7 +73,7 @@
****************************************************************************/
#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
static void _up_dumponexit(FAR struct tcb_s *tcb, FAR void *arg)
{
#if CONFIG_NFILE_DESCRIPTORS > 0
FAR struct filelist *filelist;
@ -136,7 +136,7 @@ static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
void _exit(int status)
{
_TCB* tcb;
struct tcb_s* tcb;
/* Disable interrupts. They will be restored when the next
* task is started.
@ -159,7 +159,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (_TCB*)g_readytorun.head;
tcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -67,7 +67,7 @@
*
****************************************************************************/
void up_release_stack(_TCB *dtcb)
void up_release_stack(struct tcb_s *dtcb)
{
if (dtcb->stack_alloc_ptr)
{

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -95,7 +95,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -138,7 +138,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -156,7 +156,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -83,7 +83,7 @@
*
****************************************************************************/
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
if (tcb->stack_alloc_ptr && tcb->adj_stack_size != stack_size)
{
@ -118,7 +118,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
top_of_stack &= ~1;
size_of_stack = top_of_stack - (size_t)tcb->stack_alloc_ptr;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -73,7 +73,7 @@
****************************************************************************/
#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
static void _up_dumponexit(FAR struct tcb_s *tcb, FAR void *arg)
{
#if CONFIG_NFILE_DESCRIPTORS > 0
FAR struct filelist *filelist;
@ -136,7 +136,7 @@ static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
void _exit(int status)
{
_TCB* tcb;
struct tcb_s* tcb;
/* Disable interrupts. They will be restored when the next
* task is started.
@ -159,7 +159,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (_TCB*)g_readytorun.head;
tcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -74,7 +74,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -100,7 +100,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -120,7 +120,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -68,7 +68,7 @@
*
****************************************************************************/
void up_release_stack(_TCB *dtcb)
void up_release_stack(struct tcb_s *dtcb)
{
if (dtcb->stack_alloc_ptr)
{

View File

@ -84,7 +84,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -102,7 +102,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -156,7 +156,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -175,7 +175,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */

View File

@ -79,7 +79,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -90,7 +90,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -128,7 +128,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -148,7 +148,7 @@ void up_unblock_task(_TCB *tcb)
* g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -80,7 +80,7 @@
*
****************************************************************************/
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
size_t top_of_stack;
size_t size_of_stack;
@ -112,7 +112,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
top_of_stack &= ~1;
size_of_stack = top_of_stack - (size_t)tcb->stack_alloc_ptr;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -156,8 +156,8 @@ static inline void up_registerdump(void)
#ifdef CONFIG_ARCH_STACKDUMP
static void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
uint16_t sp = up_getsp();
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint16_t sp = up_getsp();
uint16_t ustackbase;
uint16_t ustacksize;
#if CONFIG_ARCH_INTERRUPTSTACK > 3
@ -252,7 +252,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -282,7 +282,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -304,7 +304,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -76,7 +76,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;

View File

@ -84,7 +84,7 @@
* must be allocated.
****************************************************************************/
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
if (tcb->stack_alloc_ptr &&
tcb->adj_stack_size != stack_size)
@ -124,7 +124,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
top_of_stack &= ~3;
size_of_stack = top_of_stack - (uint32_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -75,7 +75,7 @@
****************************************************************************/
#if defined(CONFIG_DUMP_ON_EXIT) && defined(CONFIG_DEBUG)
static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
static void _up_dumponexit(FAR struct tcb_s *tcb, FAR void *arg)
{
#if CONFIG_NFILE_DESCRIPTORS > 0
FAR struct filelist *filelist;
@ -138,7 +138,7 @@ static void _up_dumponexit(FAR _TCB *tcb, FAR void *arg)
void _exit(int status)
{
_TCB* tcb;
struct tcb_s* tcb;
/* Disable interrupts. They will be restored when the next
* task is started.
@ -161,7 +161,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (_TCB*)g_readytorun.head;
tcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */

View File

@ -67,7 +67,7 @@
*
****************************************************************************/
void up_release_stack(_TCB *dtcb)
void up_release_stack(struct tcb_s *dtcb)
{
if (dtcb->stack_alloc_ptr)
{

View File

@ -82,7 +82,7 @@
*
****************************************************************************/
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
size_t top_of_stack;
size_t size_of_stack;
@ -113,7 +113,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
top_of_stack &= ~3;
size_of_stack = top_of_stack - (uint32_t)tcb->stack_alloc_ptr + 4;
/* Save the adjusted stack values in the _TCB */
/* Save the adjusted stack values in the struct tcb_s */
tcb->adj_stack_ptr = (uint32_t*)top_of_stack;
tcb->adj_stack_size = size_of_stack;

View File

@ -95,7 +95,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -125,7 +125,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -147,7 +147,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#ifdef CONFIG_PRINT_TASKNAME
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

View File

@ -85,7 +85,7 @@
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
@ -96,7 +96,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -139,7 +139,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -154,7 +154,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -154,8 +154,8 @@ static inline void up_registerdump(void)
void up_dumpstate(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
uint32_t sp = up_getsp();
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t sp = up_getsp();
uint32_t ustackbase;
uint32_t ustacksize;
#if CONFIG_ARCH_INTERRUPTSTACK > 3

View File

@ -80,7 +80,7 @@
*
****************************************************************************/
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct xcptcontext *xcp = &tcb->xcp;
uint32_t regval;

View File

@ -77,7 +77,7 @@
void up_release_pending(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("From TCB=%p\n", rtcb);
@ -103,7 +103,7 @@ void up_release_pending(void)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -119,7 +119,7 @@ void up_release_pending(void)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -86,7 +86,7 @@
*
****************************************************************************/
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -104,7 +104,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
bool switch_needed;
slldbg("TCB=%p PRI=%d\n", tcb, priority);
@ -158,7 +158,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
slldbg("New Active Task TCB=%p\n", rtcb);
/* Then switch contexts */
@ -174,7 +174,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -101,7 +101,7 @@
*
****************************************************************************/
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
uint32_t status;
@ -123,7 +123,7 @@ void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);
if (tcb == (_TCB*)g_readytorun.head)
if (tcb == (struct tcb_s*)g_readytorun.head)
{
/* CASE 1: We are not in an interrupt handler and
* a task is signalling itself for some reason.

View File

@ -82,7 +82,7 @@
void up_sigdeliver(void)
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
uint32_t regs[XCPTCONTEXT_REGS];
sig_deliver_t sigdeliver;

View File

@ -139,9 +139,9 @@ static void up_registerdump(const uint32_t *regs)
#ifdef CONFIG_NUTTX_KERNEL
static inline void dispatch_syscall(uint32_t *regs)
{
uint32_t cmd = regs[REG_A0];
FAR _TCB *rtcb = sched_self();
uintptr_t ret = (uintptr_t)ERROR;
uint32_t cmd = regs[REG_A0];
FAR struct tcb_s *rtcb = sched_self();
uintptr_t ret = (uintptr_t)ERROR;
/* Verify the the SYS call number is within range */

View File

@ -81,7 +81,7 @@
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
@ -92,7 +92,7 @@ void up_unblock_task(_TCB *tcb)
}
else
{
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
/* Remove the task from the blocked task list */
@ -130,7 +130,7 @@ void up_unblock_task(_TCB *tcb)
* of the g_readytorun task list.
*/
rtcb = (_TCB*)g_readytorun.head;
rtcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -145,7 +145,7 @@ void up_unblock_task(_TCB *tcb)
* ready to run list.
*/
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);
/* up_switchcontext forces a context switch to the task at the

View File

@ -113,8 +113,8 @@
pid_t up_vfork(const struct vfork_s *context)
{
_TCB *parent = (FAR _TCB *)g_readytorun.head;
_TCB *child;
struct tcb_s *parent = (FAR struct tcb_s *)g_readytorun.head;
struct tcb_s *child;
size_t stacksize;
uint32_t newsp;
#if CONFIG_MIPS32_FRAMEPOINTER

View File

@ -48,10 +48,10 @@
struct up_wait {
struct up_wait *next;
_TCB *task;
struct tcb_s *task;
};
extern _TCB *current_task;
extern struct tcb_s *current_task;
void up_sigentry(void);

View File

@ -55,7 +55,7 @@ void nuttx_arch_exit(void)
}
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct Trapframe *tf;

View File

@ -54,7 +54,7 @@
#include <arch/arch.h>
#include <os_internal.h>
_TCB *current_task = NULL;
struct tcb_s *current_task = NULL;
/**
@ -62,7 +62,7 @@ _TCB *current_task = NULL;
* to switch tasks.
* Assumption: global interrupt is disabled.
*/
static inline void up_switchcontext(_TCB *ctcb, _TCB *ntcb)
static inline void up_switchcontext(struct tcb_s *ctcb, struct tcb_s *ntcb)
{
// do nothing if two tasks are the same
if (ctcb == ntcb)
@ -111,7 +111,7 @@ void up_allocate_heap(void **heap_start, size_t *heap_size)
*heap_size = KERNBASE + kmem_size - (uint32_t)boot_freemem;
}
int up_create_stack(_TCB *tcb, size_t stack_size)
int up_create_stack(struct tcb_s *tcb, size_t stack_size)
{
int ret = ERROR;
size_t *adj_stack_ptr;
@ -139,7 +139,7 @@ int up_create_stack(_TCB *tcb, size_t stack_size)
return ret;
}
int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
int up_use_stack(struct tcb_s *tcb, void *stack, size_t stack_size)
{
/* Move up to next even word boundary if necessary */
@ -158,7 +158,7 @@ int up_use_stack(_TCB *tcb, void *stack, size_t stack_size)
return OK;
}
void up_release_stack(_TCB *dtcb)
void up_release_stack(struct tcb_s *dtcb)
{
if (dtcb->stack_alloc_ptr) {
free(dtcb->stack_alloc_ptr);
@ -192,7 +192,7 @@ void up_release_stack(_TCB *dtcb)
* hold the blocked task TCB.
*
****************************************************************************/
void up_block_task(_TCB *tcb, tstate_t task_state)
void up_block_task(struct tcb_s *tcb, tstate_t task_state)
{
/* Verify that the context switch can be performed */
if ((tcb->task_state < FIRST_READY_TO_RUN_STATE) ||
@ -201,7 +201,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
return;
}
else {
_TCB *rtcb = current_task;
struct tcb_s *rtcb = current_task;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list. If we
@ -217,7 +217,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
/* Now, perform the context switch if one is needed */
if (switch_needed) {
_TCB *nexttcb;
struct tcb_s *nexttcb;
// this part should not be executed in interrupt context
if (up_interrupt_context()) {
panic("%s: %d\n", __func__, __LINE__);
@ -230,7 +230,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
warn("Disable preemption failed for task block itself\n");
sched_mergepending();
}
nexttcb = (_TCB*)g_readytorun.head;
nexttcb = (struct tcb_s*)g_readytorun.head;
// context switch
up_switchcontext(rtcb, nexttcb);
}
@ -252,7 +252,7 @@ void up_block_task(_TCB *tcb, tstate_t task_state)
* ready to run taks, executed.
*
****************************************************************************/
void up_unblock_task(_TCB *tcb)
void up_unblock_task(struct tcb_s *tcb)
{
/* Verify that the context switch can be performed */
if ((tcb->task_state < FIRST_BLOCKED_STATE) ||
@ -261,7 +261,7 @@ void up_unblock_task(_TCB *tcb)
return;
}
else {
_TCB *rtcb = current_task;
struct tcb_s *rtcb = current_task;
/* Remove the task from the blocked task list */
sched_removeblocked(tcb);
@ -277,7 +277,7 @@ void up_unblock_task(_TCB *tcb)
// g_readytorun task list.
if (sched_addreadytorun(tcb) && !up_interrupt_context()) {
/* The currently active task has changed! */
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
// context switch
up_switchcontext(rtcb, nexttcb);
}
@ -290,20 +290,20 @@ void up_unblock_task(_TCB *tcb)
*/
void up_release_pending(void)
{
_TCB *rtcb = current_task;
struct tcb_s *rtcb = current_task;
/* Merge the g_pendingtasks list into the g_readytorun task list */
if (sched_mergepending()) {
/* The currently active task has changed! */
_TCB *nexttcb = (_TCB*)g_readytorun.head;
struct tcb_s *nexttcb = (struct tcb_s*)g_readytorun.head;
// context switch
up_switchcontext(rtcb, nexttcb);
}
}
void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
/* Verify that the caller is sane */
@ -320,7 +320,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
return;
}
else {
_TCB *rtcb = current_task;
struct tcb_s *rtcb = current_task;
bool switch_needed;
/* Remove the tcb task from the ready-to-run list.
@ -343,7 +343,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
/* Now, perform the context switch if one is needed */
if (switch_needed && !up_interrupt_context()) {
_TCB *nexttcb;
struct tcb_s *nexttcb;
// If there are any pending tasks, then add them to the g_readytorun
// task list now. It should be the up_realease_pending() called from
// sched_unlock() to do this for disable preemption. But it block
@ -353,7 +353,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
sched_mergepending();
}
nexttcb = (_TCB*)g_readytorun.head;
nexttcb = (struct tcb_s*)g_readytorun.head;
// context switch
up_switchcontext(rtcb, nexttcb);
}
@ -362,7 +362,7 @@ void up_reprioritize_rtr(_TCB *tcb, uint8_t priority)
void _exit(int status)
{
_TCB* tcb;
struct tcb_s* tcb;
/* Destroy the task at the head of the ready to run list. */
@ -372,7 +372,7 @@ void _exit(int status)
* head of the list.
*/
tcb = (_TCB*)g_readytorun.head;
tcb = (struct tcb_s*)g_readytorun.head;
/* Then switch contexts */
@ -413,7 +413,7 @@ void up_assert_code(const uint8_t *filename, int line, int code)
#ifndef CONFIG_DISABLE_SIGNALS
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver)
{
/* Refuse to handle nested signal actions */
if (!tcb->xcp.sigdeliver) {

View File

@ -101,14 +101,14 @@ void rtos_exit_interrupt(void)
local_irq_disable();
nest_irq--;
if (!nest_irq) {
_TCB *rtcb = current_task;
_TCB *ntcb;
struct tcb_s *rtcb = current_task;
struct tcb_s *ntcb;
if (rtcb->xcp.sigdeliver) {
rtcb->xcp.ctx.tf = current_regs;
push_xcptcontext(&rtcb->xcp);
}
ntcb = (_TCB*)g_readytorun.head;
ntcb = (struct tcb_s*)g_readytorun.head;
// switch needed
if (rtcb != ntcb) {
rtcb->xcp.ctx.tf = current_regs;

View File

@ -71,7 +71,7 @@ void nuttx_arch_exit(void)
}
void up_initial_state(_TCB *tcb)
void up_initial_state(struct tcb_s *tcb)
{
struct Trapframe *tf;

View File

@ -81,7 +81,7 @@ static void _up_assert(int errorcode)
{
/* Are we in an interrupt handler or the idle task? */
if (current_regs || ((_TCB*)g_readytorun.head)->pid == 0)
if (current_regs || ((struct tcb_s*)g_readytorun.head)->pid == 0)
{
(void)irqsave();
for(;;)
@ -111,7 +111,7 @@ static void _up_assert(int errorcode)
void up_assert(const uint8_t *filename, int lineno)
{
#if CONFIG_TASK_NAME_SIZE > 0 && defined(CONFIG_DEBUG)
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);
@ -133,7 +133,7 @@ void up_assert(const uint8_t *filename, int lineno)
void up_assert_code(const uint8_t *filename, int lineno, int errorcode)
{
#if CONFIG_TASK_NAME_SIZE > 0 && defined(CONFIG_DEBUG)
_TCB *rtcb = (_TCB*)g_readytorun.head;
struct tcb_s *rtcb = (struct tcb_s*)g_readytorun.head;
#endif
up_ledon(LED_ASSERTION);

Some files were not shown because too many files have changed in this diff Show More