blob: fa60af3eceeb88901e4fd404fae4c983910b65c4 [file] [log] [blame]
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001/*
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002 * FreeRTOS Kernel V10.2.1
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
xiaohu.huang4f321fb2024-03-22 14:50:29 +080022 * http://www.FreeRTOS.org
23 * http://aws.amazon.com/freertos
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080024 *
xiaohu.huang4f321fb2024-03-22 14:50:29 +080025 * 1 tab == 4 spaces!
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080026 */
27
28/* Standard includes. */
29#include <stdlib.h>
30#include <string.h>
31
32/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
xiaohu.huang4f321fb2024-03-22 14:50:29 +080033all the API functions to use the MPU wrappers. That should only be done when
34task.h is included from an application file. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080035#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37/* FreeRTOS includes. */
38#include "FreeRTOS.h"
39#include "task.h"
40#include "timers.h"
41#include "stack_macros.h"
shijie.xionga23e8242024-02-27 14:57:02 +080042#if CONFIG_FTRACE
43#include "ftrace.h"
44#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080045
46/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
xiaohu.huang4f321fb2024-03-22 14:50:29 +080047because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
48for the header files above, but not in this file, in order to generate the
49correct privileged Vs unprivileged linkage and placement. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080050#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
51
52/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
xiaohu.huang4f321fb2024-03-22 14:50:29 +080053functions but without including stdio.h here. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080054#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
xiaohu.huang4f321fb2024-03-22 14:50:29 +080055 /* At the bottom of this file are two optional functions that can be used
56 to generate human readable text from the raw data generated by the
57 uxTaskGetSystemState() function. Note the formatting functions are provided
58 for convenience only, and are NOT considered part of the kernel. */
59 #include <stdio.h>
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080060#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
61
xiaohu.huang4f321fb2024-03-22 14:50:29 +080062#if( configUSE_PREEMPTION == 0 )
63 /* If the cooperative scheduler is being used then a yield should not be
64 performed just because a higher priority task has been woken. */
65 #define taskYIELD_IF_USING_PREEMPTION()
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080066#else
xiaohu.huang4f321fb2024-03-22 14:50:29 +080067 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080068#endif
69
70/* Values that can be assigned to the ucNotifyState member of the TCB. */
xiaohu.huang4f321fb2024-03-22 14:50:29 +080071#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
72#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
73#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080074
75/*
76 * The value used to fill the stack of a task when the task is created. This
77 * is used purely for checking the high water mark for tasks.
78 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +080079#define tskSTACK_FILL_BYTE ( 0xa5U )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080080
xiaohu.huang4f321fb2024-03-22 14:50:29 +080081/* Bits used to recored how a task's stack and TCB were allocated. */
82#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
83#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
84#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080085
86/* If any of the following are set then task stacks are filled with a known
xiaohu.huang4f321fb2024-03-22 14:50:29 +080087value so the high water mark can be determined. If none of the following are
88set then don't fill the stack so there is no unnecessary dependency on memset. */
89#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
90 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080091#else
xiaohu.huang4f321fb2024-03-22 14:50:29 +080092 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080093#endif
94
95/*
96 * Macros used by vListTask to indicate which state a task is in.
97 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +080098#define tskRUNNING_CHAR ( 'X' )
99#define tskBLOCKED_CHAR ( 'B' )
100#define tskREADY_CHAR ( 'R' )
101#define tskDELETED_CHAR ( 'D' )
102#define tskSUSPENDED_CHAR ( 'S' )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800103
104/*
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800105 * Some kernel aware debuggers require the data the debugger needs access to be
106 * global, rather than file scope.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800107 */
108#ifdef portREMOVE_STATIC_QUALIFIER
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800109 #define static
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800110#endif
111
112/* The name allocated to the Idle task. This can be overridden by defining
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800113configIDLE_TASK_NAME in FreeRTOSConfig.h. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800114#ifndef configIDLE_TASK_NAME
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800115 #define configIDLE_TASK_NAME "IDLE"
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800116#endif
117
118#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
119
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800120 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
121 performed in a generic way that is not optimised to any particular
122 microcontroller architecture. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800123
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800124 /* uxTopReadyPriority holds the priority of the highest priority ready
125 state task. */
126 #define taskRECORD_READY_PRIORITY( uxPriority ) \
127 { \
128 if( ( uxPriority ) > uxTopReadyPriority ) \
129 { \
130 uxTopReadyPriority = ( uxPriority ); \
131 } \
132 } /* taskRECORD_READY_PRIORITY */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800133
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800134 /*-----------------------------------------------------------*/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800135
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800136 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
137 { \
138 UBaseType_t uxTopPriority = uxTopReadyPriority; \
139 \
140 /* Find the highest priority queue that contains ready tasks. */ \
141 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
142 { \
143 configASSERT( uxTopPriority ); \
144 --uxTopPriority; \
145 } \
146 \
147 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
148 the same priority get an equal share of the processor time. */ \
149 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
150 uxTopReadyPriority = uxTopPriority; \
151 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800152
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800153 /*-----------------------------------------------------------*/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800154
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800155 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
156 they are only required when a port optimised method of task selection is
157 being used. */
158 #define taskRESET_READY_PRIORITY( uxPriority )
159 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800160
161#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
162
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800163 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
164 performed in a way that is tailored to the particular microcontroller
165 architecture being used. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800166
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800167 /* A port optimised version is provided. Call the port defined macros. */
168 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800169
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800170 /*-----------------------------------------------------------*/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800171
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800172 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
173 { \
174 UBaseType_t uxTopPriority; \
175 \
176 /* Find the highest priority list that contains ready tasks. */ \
177 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
178 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
179 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
180 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800181
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800182 /*-----------------------------------------------------------*/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800183
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800184 /* A port optimised version is provided, call it only if the TCB being reset
185 is being referenced from a ready list. If it is referenced from a delayed
186 or suspended list then it won't be in a ready list. */
187 #define taskRESET_READY_PRIORITY( uxPriority ) \
188 { \
189 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
190 { \
191 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
192 } \
193 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800194
195#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
196
197/*-----------------------------------------------------------*/
198
199/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800200count overflows. */
201#define taskSWITCH_DELAYED_LISTS() \
202{ \
203 List_t *pxTemp; \
204 \
205 /* The delayed tasks list should be empty when the lists are switched. */ \
206 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
207 \
208 pxTemp = pxDelayedTaskList; \
209 pxDelayedTaskList = pxOverflowDelayedTaskList; \
210 pxOverflowDelayedTaskList = pxTemp; \
211 xNumOfOverflows++; \
212 prvResetNextTaskUnblockTime(); \
213}
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800214
215/*-----------------------------------------------------------*/
216
217/*
218 * Place the task represented by pxTCB into the appropriate ready list for
219 * the task. It is inserted at the end of the list.
220 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800221#define prvAddTaskToReadyList( pxTCB ) \
222 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
223 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
224 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
225 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800226/*-----------------------------------------------------------*/
227
228/*
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800229 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800230 * where NULL is used to indicate that the handle of the currently executing
231 * task should be used in place of the parameter. This macro simply checks to
232 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
233 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800234#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800235
236/* The item value of the event list item is normally used to hold the priority
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800237of the task to which it belongs (coded to allow it to be held in reverse
238priority order). However, it is occasionally borrowed for other purposes. It
239is important its value is not updated due to a task priority change while it is
240being used for another purpose. The following bit definition is used to inform
241the scheduler that the value should not be changed - in which case it is the
242responsibility of whichever module is using the value to ensure it gets set back
243to its original value when it is released. */
244#if( configUSE_16_BIT_TICKS == 1 )
245 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800246#else
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800247 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800248#endif
249
250/*
251 * Task control block. A task control block (TCB) is allocated for each task,
252 * and stores task state information, including a pointer to the task's context
253 * (the task's run time environment, including register values)
254 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800255typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800256{
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800257 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800258
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800259 #if ( portUSING_MPU_WRAPPERS == 1 )
260 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
261 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800262
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800263 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
264 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
265 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
266 StackType_t *pxStack; /*< Points to the start of the stack. */
267 StackType_t uStackDepth;
268 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800269
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800270 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
271 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
272 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800273
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800274 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
275 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
276 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800277
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800278 #if ( configUSE_TRACE_FACILITY == 1 )
279 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
280 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
281 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800282
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800283 #if ( configUSE_MUTEXES == 1 )
284 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
285 UBaseType_t uxMutexesHeld;
286 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800287
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800288 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
289 TaskHookFunction_t pxTaskTag;
290 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800291
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800292 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
293 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
294 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800295
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800296 #if( configGENERATE_RUN_TIME_STATS == 1 )
297 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
298 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800299
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800300 #if ( configUSE_NEWLIB_REENTRANT == 1 )
301 /* Allocate a Newlib reent structure that is specific to this task.
302 Note Newlib support has been included by popular demand, but is not
303 used by the FreeRTOS maintainers themselves. FreeRTOS is not
304 responsible for resulting newlib operation. User must be familiar with
305 newlib and must provide system-wide implementations of the necessary
306 stubs. Be warned that (at the time of writing) the current newlib design
307 implements a system-wide malloc() that must be provided with locks. */
308 struct _reent xNewLib_reent;
309 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800310
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800311 #if( configUSE_TASK_NOTIFICATIONS == 1 )
312 volatile uint32_t ulNotifiedValue;
313 volatile uint8_t ucNotifyState;
314 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800315
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800316 /* See the comments in FreeRTOS.h with the definition of
317 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
318 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
319 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
320 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800321
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800322 #if( INCLUDE_xTaskAbortDelay == 1 )
323 uint8_t ucDelayAborted;
324 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800325
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800326 #if( configUSE_POSIX_ERRNO == 1 )
327 int iTaskErrno;
328 #endif
329 #if ( configUSE_TASK_START_HOOK == 1 )
330 void *pxTaskFun;
331 void *pxTaskPara;
332 #endif
333 #if ENABLE_KASAN
334 int kasan_depth;
335 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800336} tskTCB;
337
338/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800339below to enable the use of older kernel aware debuggers. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800340typedef tskTCB TCB_t;
341
342/*lint -save -e956 A manual analysis and inspection has been used to determine
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800343which static variables must be declared volatile. */
344PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800345
346/* Lists for ready and blocked tasks. --------------------
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800347xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
348doing so breaks some kernel aware debuggers and debuggers that rely on removing
349the static qualifier. */
350PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
351PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
352PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
353PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
354PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
355PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800356
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800357#if( INCLUDE_vTaskDelete == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800358
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800359 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
360 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800361
362#endif
363
364#if ( INCLUDE_vTaskSuspend == 1 )
365
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800366 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800367
368#endif
369
370/* Global POSIX errno. Its value is changed upon context switching to match
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800371the errno of the currently running task. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800372#if ( configUSE_POSIX_ERRNO == 1 )
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800373 int FreeRTOS_errno = 0;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800374#endif
375
376/* Other file private variables. --------------------------------*/
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800377PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
378PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
379PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
380PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
381PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
382PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
383PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
384PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
385PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
386PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800387
388/* Context switches are held pending while the scheduler is suspended. Also,
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800389interrupts must not manipulate the xStateListItem of a TCB, or any of the
390lists the xStateListItem can be referenced from, if the scheduler is suspended.
391If an interrupt needs to unblock a task while the scheduler is suspended then it
392moves the task's event list item into the xPendingReadyList, ready for the
393kernel to move the task from the pending ready list into the real ready list
394when the scheduler is unsuspended. The pending ready list itself can only be
395accessed from a critical section. */
396PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800397
398#if ( configGENERATE_RUN_TIME_STATS == 1 )
399
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800400 /* Do not move these variables to function scope as doing so prevents the
401 code working with debuggers that need to remove the static qualifier. */
402 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
403 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800404
405#endif
406
407/*lint -restore */
408
409/*-----------------------------------------------------------*/
410
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800411/* Callback function prototypes. --------------------------*/
412#if( configCHECK_FOR_STACK_OVERFLOW > 0 )
413
414 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
415
416#endif
417
418#if( configUSE_TICK_HOOK > 0 )
419
420 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
421
422#endif
423
424#if( configSUPPORT_STATIC_ALLOCATION == 1 )
425
426 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, configSTACK_DEPTH_TYPE *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
427
428#endif
429
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800430/* File private functions. --------------------------------*/
431
432/**
433 * Utility task that simply returns pdTRUE if the task referenced by xTask is
434 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
435 * is in any other state.
436 */
437#if ( INCLUDE_vTaskSuspend == 1 )
438
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800439 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800440
441#endif /* INCLUDE_vTaskSuspend */
442
443/*
444 * Utility to ready all the lists used by the scheduler. This is called
445 * automatically upon the creation of the first task.
446 */
447static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
448
449/*
450 * The idle task, which as all tasks is implemented as a never ending loop.
451 * The idle task is automatically created and added to the ready lists upon
452 * creation of the first user task.
453 *
454 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
455 * language extensions. The equivalent prototype for this function is:
456 *
457 * void prvIdleTask( void *pvParameters );
458 *
459 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800460static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800461
462/*
463 * Utility to free all memory allocated by the scheduler to hold a TCB,
464 * including the stack pointed to by the TCB.
465 *
466 * This does not free memory allocated by the task itself (i.e. memory
467 * allocated by calls to pvPortMalloc from within the tasks application code).
468 */
469#if ( INCLUDE_vTaskDelete == 1 )
470
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800471 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800472
473#endif
474
475/*
476 * Used only by the idle task. This checks to see if anything has been placed
477 * in the list of tasks waiting to be deleted. If so the task is cleaned up
478 * and its TCB deleted.
479 */
480static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
481
482/*
483 * The currently executing task is entering the Blocked state. Add the task to
484 * either the current or the overflow delayed task list.
485 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800486static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800487
488/*
489 * Fills an TaskStatus_t structure with information on each task that is
490 * referenced from the pxList list (which may be a ready list, a delayed list,
491 * a suspended list, etc.).
492 *
493 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
494 * NORMAL APPLICATION CODE.
495 */
496#if ( configUSE_TRACE_FACILITY == 1 )
497
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800498 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800499
500#endif
501
502/*
503 * Searches pxList for a task with name pcNameToQuery - returning a handle to
504 * the task if it is found, or NULL if the task is not found.
505 */
506#if ( INCLUDE_xTaskGetHandle == 1 )
507
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800508 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800509
510#endif
511
512/*
513 * When a task is created, the stack of the task is filled with a known value.
514 * This function determines the 'high water mark' of the task stack by
515 * determining how much of the stack remains at the original preset value.
516 */
517#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
518
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800519 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800520
521#endif
522
523/*
524 * Return the amount of time, in ticks, that will pass before the kernel will
525 * next move a task from the Blocked state to the Running state.
526 *
527 * This conditional compilation should use inequality to 0, not equality to 1.
528 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
529 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
530 * set to a value other than 1.
531 */
532#if ( configUSE_TICKLESS_IDLE != 0 )
533
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800534 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800535
536#endif
537
538/*
539 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
540 * will exit the Blocked state.
541 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800542static void prvResetNextTaskUnblockTime( void );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800543
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800544#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800545
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800546 /*
547 * Helper function used to pad task names with spaces when printing out
548 * human readable tables of task information.
549 */
550 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800551
552#endif
553
554/*
555 * Called after a Task_t structure has been allocated either statically or
556 * dynamically to fill in the structure's members.
557 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800558static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
559 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
560 const uint32_t ulStackDepth,
561 void * pvParameters,
562 UBaseType_t uxPriority,
563 TaskHandle_t * const pxCreatedTask,
564 TCB_t *pxNewTCB,
565 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800566
567/*
568 * Called after a new task has been created and initialised to place the task
569 * under the control of the scheduler.
570 */
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800571static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800572
573/*
574 * freertos_tasks_c_additions_init() should only be called if the user definable
575 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
576 * called by the function.
577 */
578#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
579
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800580 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800581
582#endif
583
584/*-----------------------------------------------------------*/
585
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800586#if( configSUPPORT_STATIC_ALLOCATION == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800587
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800588 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
589 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
590 const uint32_t ulStackDepth,
591 void * const pvParameters,
592 UBaseType_t uxPriority,
593 StackType_t * const puxStackBuffer,
594 StaticTask_t * const pxTaskBuffer )
595 {
596 TCB_t *pxNewTCB;
597 TaskHandle_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800598
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800599 configASSERT( puxStackBuffer != NULL );
600 configASSERT( pxTaskBuffer != NULL );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800601
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800602 #if( configASSERT_DEFINED == 1 )
603 {
604 /* Sanity check that the size of the structure used to declare a
605 variable of type StaticTask_t equals the size of the real task
606 structure. */
607 volatile size_t xSize = sizeof( StaticTask_t );
608 configASSERT( xSize == sizeof( TCB_t ) );
609 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
610 }
611 #endif /* configASSERT_DEFINED */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800612
613
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800614 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
615 {
616 /* The memory used for the task's TCB and stack are passed into this
617 function - use them. */
618 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
619 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800620
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800621 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
622 {
623 /* Tasks can be created statically or dynamically, so note this
624 task was created statically in case the task is later deleted. */
625 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
626 }
627 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800628
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800629 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
630 prvAddNewTaskToReadyList( pxNewTCB );
631 }
632 else
633 {
634 xReturn = NULL;
635 }
636
637 return xReturn;
638 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800639
640#endif /* SUPPORT_STATIC_ALLOCATION */
641/*-----------------------------------------------------------*/
642
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800643#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800644
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800645 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
646 {
647 TCB_t *pxNewTCB;
648 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800649
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800650 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
651 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800652
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800653 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
654 {
655 /* Allocate space for the TCB. Where the memory comes from depends
656 on the implementation of the port malloc function and whether or
657 not static allocation is being used. */
658 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800659
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800660 /* Store the stack location in the TCB. */
661 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800662
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800663 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
664 {
665 /* Tasks can be created statically or dynamically, so note this
666 task was created statically in case the task is later deleted. */
667 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
668 }
669 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800670
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800671 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
672 pxTaskDefinition->pcName,
673 ( uint32_t ) pxTaskDefinition->usStackDepth,
674 pxTaskDefinition->pvParameters,
675 pxTaskDefinition->uxPriority,
676 pxCreatedTask, pxNewTCB,
677 pxTaskDefinition->xRegions );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800678
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800679 prvAddNewTaskToReadyList( pxNewTCB );
680 xReturn = pdPASS;
681 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800682
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800683 return xReturn;
684 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800685
686#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
687/*-----------------------------------------------------------*/
688
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800689#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800690
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800691 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
692 {
693 TCB_t *pxNewTCB;
694 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800695
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800696 configASSERT( pxTaskDefinition->puxStackBuffer );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800697
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800698 if( pxTaskDefinition->puxStackBuffer != NULL )
699 {
700 /* Allocate space for the TCB. Where the memory comes from depends
701 on the implementation of the port malloc function and whether or
702 not static allocation is being used. */
703 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800704
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800705 if( pxNewTCB != NULL )
706 {
707 /* Store the stack location in the TCB. */
708 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800709
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800710 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
711 {
712 /* Tasks can be created statically or dynamically, so note
713 this task had a statically allocated stack in case it is
714 later deleted. The TCB was allocated dynamically. */
715 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
716 }
717 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800718
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800719 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
720 pxTaskDefinition->pcName,
721 ( uint32_t ) pxTaskDefinition->usStackDepth,
722 pxTaskDefinition->pvParameters,
723 pxTaskDefinition->uxPriority,
724 pxCreatedTask, pxNewTCB,
725 pxTaskDefinition->xRegions );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800726
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800727 prvAddNewTaskToReadyList( pxNewTCB );
728 xReturn = pdPASS;
729 }
730 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800731
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800732 return xReturn;
733 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800734
735#endif /* portUSING_MPU_WRAPPERS */
736/*-----------------------------------------------------------*/
737
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800738#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800739
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800740 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
741 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
742 const configSTACK_DEPTH_TYPE usStackDepth,
743 void * const pvParameters,
744 UBaseType_t uxPriority,
745 TaskHandle_t * const pxCreatedTask )
746 {
747 TCB_t *pxNewTCB;
748 BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800749
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800750
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800751 /* If the stack grows down then allocate the stack then the TCB so the stack
752 does not grow into the TCB. Likewise if the stack grows up then allocate
753 the TCB then the stack. */
754 #if( portSTACK_GROWTH > 0 )
755 {
756 /* Allocate space for the TCB. Where the memory comes from depends on
757 the implementation of the port malloc function and whether or not static
758 allocation is being used. */
759 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800760
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800761 if( pxNewTCB != NULL )
762 {
763 /* Allocate space for the stack used by the task being created.
764 The base of the stack memory stored in the TCB so the task can
765 be deleted later if required. */
766 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800767
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800768 if( pxNewTCB->pxStack == NULL )
769 {
770 /* Could not allocate the stack. Delete the allocated TCB. */
771 vPortFree( pxNewTCB );
772 pxNewTCB = NULL;
773 }
774 }
775 }
776 #else /* portSTACK_GROWTH */
777 {
778 StackType_t *pxStack;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800779
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800780 /* Allocate space for the stack used by the task being created. */
781 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800782
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800783 if( pxStack != NULL )
784 {
785 /* Allocate space for the TCB. */
786 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800787
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800788 if( pxNewTCB != NULL )
789 {
790 /* Store the stack location in the TCB. */
791 pxNewTCB->pxStack = pxStack;
792 }
793 else
794 {
795 /* The stack cannot be used as the TCB was not created. Free
796 it again. */
797 vPortFree( pxStack );
798 }
799 }
800 else
801 {
802 pxNewTCB = NULL;
803 }
804 }
805 #endif /* portSTACK_GROWTH */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800806
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800807 if( pxNewTCB != NULL )
808 {
809 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
810 {
811 /* Tasks can be created statically or dynamically, so note this
812 task was created dynamically in case it is later deleted. */
813 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
814 }
815 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800816
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800817 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
818 prvAddNewTaskToReadyList( pxNewTCB );
819 xReturn = pdPASS;
820 }
821 else
822 {
823 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
824 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800825
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800826 return xReturn;
827 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800828
829#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
830/*-----------------------------------------------------------*/
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800831#if ( configUSE_TASK_START_HOOK == 1 )
832static void prvTaskFunWrp( void *para)
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800833{
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800834 TCB_t *pxNewTCB = (TCB_t *)para;
835 {
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800836
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800837 extern void vApplicationTaskStartHook( void );
838 vApplicationTaskStartHook();
839 }
840 ((TaskFunction_t)pxNewTCB->pxTaskFun)(pxNewTCB->pxTaskPara);
841}
842#endif
843static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
844 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
845 const uint32_t ulStackDepth,
846 void * pvParameters,
847 UBaseType_t uxPriority,
848 TaskHandle_t * const pxCreatedTask,
849 TCB_t *pxNewTCB,
850 const MemoryRegion_t * const xRegions )
851{
852StackType_t *pxTopOfStack;
853UBaseType_t x;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800854
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800855 #if( portUSING_MPU_WRAPPERS == 1 )
856 /* Should the task be created in privileged mode? */
857 BaseType_t xRunPrivileged;
858 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
859 {
860 xRunPrivileged = pdTRUE;
861 }
862 else
863 {
864 xRunPrivileged = pdFALSE;
865 }
866 uxPriority &= ~portPRIVILEGE_BIT;
867 #endif /* portUSING_MPU_WRAPPERS == 1 */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800868
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800869 #if ENABLE_KASAN
870 pxNewTCB->kasan_depth = 0;
871 #endif
shijie.xiong02c0d642024-02-20 10:37:19 +0800872
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800873 #if ( configUSE_TASK_START_HOOK == 1 )
874 pxNewTCB->pxTaskFun = pxTaskCode;
875 pxNewTCB->pxTaskPara = pvParameters;
876 pxTaskCode = prvTaskFunWrp;
877 pvParameters = pxNewTCB;
878 #endif
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800879
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800880 pxNewTCB->uStackDepth = ulStackDepth;
881 /* Avoid dependency on memset() if it is not required. */
882 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
883 {
884 /* Fill the stack with a known value to assist debugging. */
885 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
886 }
887 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800888
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800889 /* Calculate the top of stack address. This depends on whether the stack
890 grows from high memory to low (as per the 80x86) or vice versa.
891 portSTACK_GROWTH is used to make the result positive or negative as required
892 by the port. */
893 #if( portSTACK_GROWTH < 0 )
894 {
895 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
896 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800897
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800898 /* Check the alignment of the calculated top of stack is correct. */
899 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800900
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800901 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
902 {
903 /* Also record the stack's high address, which may assist
904 debugging. */
905 pxNewTCB->pxEndOfStack = pxTopOfStack;
906 }
907 #endif /* configRECORD_STACK_HIGH_ADDRESS */
908 }
909 #else /* portSTACK_GROWTH */
910 {
911 pxTopOfStack = pxNewTCB->pxStack;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800912
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800913 /* Check the alignment of the stack buffer is correct. */
914 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800915
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800916 /* The other extreme of the stack space is required if stack checking is
917 performed. */
918 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
919 }
920 #endif /* portSTACK_GROWTH */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800921
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800922 /* Store the task name in the TCB. */
923 if( pcName != NULL )
924 {
925 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
926 {
927 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800928
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800929 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
930 configMAX_TASK_NAME_LEN characters just in case the memory after the
931 string is not accessible (extremely unlikely). */
932 if( pcName[ x ] == ( char ) 0x00 )
933 {
934 break;
935 }
936 else
937 {
938 mtCOVERAGE_TEST_MARKER();
939 }
940 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800941
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800942 /* Ensure the name string is terminated in the case that the string length
943 was greater or equal to configMAX_TASK_NAME_LEN. */
944 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
945 }
946 else
947 {
948 /* The task has not been given a name, so just ensure there is a NULL
949 terminator when it is read out. */
950 pxNewTCB->pcTaskName[ 0 ] = 0x00;
951 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800952
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800953 /* This is used as an array index so must ensure it's not too large. First
954 remove the privilege bit if one is present. */
955 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
956 {
957 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
958 }
959 else
960 {
961 mtCOVERAGE_TEST_MARKER();
962 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800963
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800964 pxNewTCB->uxPriority = uxPriority;
965 #if ( configUSE_MUTEXES == 1 )
966 {
967 pxNewTCB->uxBasePriority = uxPriority;
968 pxNewTCB->uxMutexesHeld = 0;
969 }
970 #endif /* configUSE_MUTEXES */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800971
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800972 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
973 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800974
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800975 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
976 back to the containing TCB from a generic item in a list. */
977 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800978
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800979 /* Event lists are always in priority order. */
980 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
981 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800982
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800983 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
984 {
985 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
986 }
987 #endif /* portCRITICAL_NESTING_IN_TCB */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800988
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800989 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
990 {
991 pxNewTCB->pxTaskTag = NULL;
992 }
993 #endif /* configUSE_APPLICATION_TASK_TAG */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800994
xiaohu.huang4f321fb2024-03-22 14:50:29 +0800995 #if ( configGENERATE_RUN_TIME_STATS == 1 )
996 {
997 pxNewTCB->ulRunTimeCounter = 0UL;
998 }
999 #endif /* configGENERATE_RUN_TIME_STATS */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001000
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001001 #if ( portUSING_MPU_WRAPPERS == 1 )
1002 {
1003 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1004 }
1005 #else
1006 {
1007 /* Avoid compiler warning about unreferenced parameter. */
1008 ( void ) xRegions;
1009 }
1010 #endif
1011
1012 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1013 {
1014 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1015 {
1016 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1017 }
1018 }
1019 #endif
1020
1021 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1022 {
1023 pxNewTCB->ulNotifiedValue = 0;
1024 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1025 }
1026 #endif
1027
1028 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1029 {
1030 /* Initialise this task's Newlib reent structure. */
1031 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
1032 }
1033 #endif
1034
1035 #if( INCLUDE_xTaskAbortDelay == 1 )
1036 {
1037 pxNewTCB->ucDelayAborted = pdFALSE;
1038 }
1039 #endif
1040
1041 /* Initialize the TCB stack to look as if the task was already running,
1042 but had been interrupted by the scheduler. The return address is set
1043 to the start of the task function. Once the stack has been initialised
1044 the top of stack variable is updated. */
1045 #if( portUSING_MPU_WRAPPERS == 1 )
1046 {
1047 /* If the port has capability to detect stack overflow,
1048 pass the stack end address to the stack initialization
1049 function as well. */
1050 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1051 {
1052 #if( portSTACK_GROWTH < 0 )
1053 {
1054 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1055 }
1056 #else /* portSTACK_GROWTH */
1057 {
1058 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1059 }
1060 #endif /* portSTACK_GROWTH */
1061 }
1062 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1063 {
1064 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1065 }
1066 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1067 }
1068 #else /* portUSING_MPU_WRAPPERS */
1069 {
1070 /* If the port has capability to detect stack overflow,
1071 pass the stack end address to the stack initialization
1072 function as well. */
1073 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1074 {
1075 #if( portSTACK_GROWTH < 0 )
1076 {
1077 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1078 }
1079 #else /* portSTACK_GROWTH */
1080 {
1081 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1082 }
1083 #endif /* portSTACK_GROWTH */
1084 }
1085 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1086 {
1087 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1088 }
1089 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1090 }
1091 #endif /* portUSING_MPU_WRAPPERS */
1092
1093 if( pxCreatedTask != NULL )
1094 {
1095 /* Pass the handle out in an anonymous way. The handle can be used to
1096 change the created task's priority, delete the created task, etc.*/
1097 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1098 }
1099 else
1100 {
1101 mtCOVERAGE_TEST_MARKER();
1102 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001103}
1104/*-----------------------------------------------------------*/
1105
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001106static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001107{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001108 /* Ensure interrupts don't access the task lists while the lists are being
1109 updated. */
1110 taskENTER_CRITICAL();
1111 {
1112 uxCurrentNumberOfTasks++;
1113 if( pxCurrentTCB == NULL )
1114 {
1115 /* There are no other tasks, or all the other tasks are in
1116 the suspended state - make this the current task. */
1117 pxCurrentTCB = pxNewTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001118
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001119 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1120 {
1121 /* This is the first task to be created so do the preliminary
1122 initialisation required. We will not recover if this call
1123 fails, but we will report the failure. */
1124 prvInitialiseTaskLists();
1125 }
1126 else
1127 {
1128 mtCOVERAGE_TEST_MARKER();
1129 }
1130 }
1131 else
1132 {
1133 /* If the scheduler is not already running, make this task the
1134 current task if it is the highest priority task to be created
1135 so far. */
1136 if( xSchedulerRunning == pdFALSE )
1137 {
1138 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1139 {
1140 pxCurrentTCB = pxNewTCB;
1141 }
1142 else
1143 {
1144 mtCOVERAGE_TEST_MARKER();
1145 }
1146 }
1147 else
1148 {
1149 mtCOVERAGE_TEST_MARKER();
1150 }
1151 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001152
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001153 uxTaskNumber++;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001154
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001155 #if ( configUSE_TRACE_FACILITY == 1 )
1156 {
1157 /* Add a counter into the TCB for tracing only. */
1158 pxNewTCB->uxTCBNumber = uxTaskNumber;
1159 }
1160 #endif /* configUSE_TRACE_FACILITY */
1161 traceTASK_CREATE( pxNewTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001162
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001163 prvAddTaskToReadyList( pxNewTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001164
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001165 portSETUP_TCB( pxNewTCB );
1166 }
1167 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001168
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001169 if( xSchedulerRunning != pdFALSE )
1170 {
1171 /* If the created task is of a higher priority than the current task
1172 then it should run now. */
1173 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
1174 {
1175 taskYIELD_IF_USING_PREEMPTION();
1176 }
1177 else
1178 {
1179 mtCOVERAGE_TEST_MARKER();
1180 }
1181 }
1182 else
1183 {
1184 mtCOVERAGE_TEST_MARKER();
1185 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001186}
1187/*-----------------------------------------------------------*/
1188
1189#if ( INCLUDE_vTaskDelete == 1 )
1190
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001191 void vTaskDelete( TaskHandle_t xTaskToDelete )
1192 {
1193 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001194
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001195 taskENTER_CRITICAL();
1196 {
1197 /* If null is passed in here then it is the calling task that is
1198 being deleted. */
1199 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001200
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001201 /* Remove task from the ready list. */
1202 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1203 {
1204 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1205 }
1206 else
1207 {
1208 mtCOVERAGE_TEST_MARKER();
1209 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001210
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001211 /* Is the task waiting on an event also? */
1212 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1213 {
1214 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1215 }
1216 else
1217 {
1218 mtCOVERAGE_TEST_MARKER();
1219 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001220
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001221 /* Increment the uxTaskNumber also so kernel aware debuggers can
1222 detect that the task lists need re-generating. This is done before
1223 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1224 not return. */
1225 uxTaskNumber++;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001226
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001227 if( pxTCB == pxCurrentTCB )
1228 {
1229 /* A task is deleting itself. This cannot complete within the
1230 task itself, as a context switch to another task is required.
1231 Place the task in the termination list. The idle task will
1232 check the termination list and free up any memory allocated by
1233 the scheduler for the TCB and stack of the deleted task. */
1234 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001235
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001236 /* Increment the ucTasksDeleted variable so the idle task knows
1237 there is a task that has been deleted and that it should therefore
1238 check the xTasksWaitingTermination list. */
1239 ++uxDeletedTasksWaitingCleanUp;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001240
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001241 /* The pre-delete hook is primarily for the Windows simulator,
1242 in which Windows specific clean up operations are performed,
1243 after which it is not possible to yield away from this task -
1244 hence xYieldPending is used to latch that a context switch is
1245 required. */
1246 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1247 }
1248 else
1249 {
1250 --uxCurrentNumberOfTasks;
1251 prvDeleteTCB( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001252
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001253 /* Reset the next expected unblock time in case it referred to
1254 the task that has just been deleted. */
1255 prvResetNextTaskUnblockTime();
1256 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001257
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001258 traceTASK_DELETE( pxTCB );
shijie.xiongf9b5e162022-07-14 15:12:48 +08001259
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001260#ifdef CONFIG_DMALLOC
1261 xClearSpecDmallocNode(pxTCB->uxTCBNumber);
1262#endif
1263 }
1264 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001265
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001266 /* Force a reschedule if it is the currently running task that has just
1267 been deleted. */
1268 if( xSchedulerRunning != pdFALSE )
1269 {
1270 if( pxTCB == pxCurrentTCB )
1271 {
1272 configASSERT( uxSchedulerSuspended == 0 );
1273 portYIELD_WITHIN_API();
1274 }
1275 else
1276 {
1277 mtCOVERAGE_TEST_MARKER();
1278 }
1279 }
1280 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001281
1282#endif /* INCLUDE_vTaskDelete */
1283/*-----------------------------------------------------------*/
1284
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001285#if ( INCLUDE_vTaskDelayUntil == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001286
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001287 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1288 {
1289 TickType_t xTimeToWake;
1290 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001291
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001292 configASSERT( pxPreviousWakeTime );
1293 configASSERT( ( xTimeIncrement > 0U ) );
1294 configASSERT( uxSchedulerSuspended == 0 );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001295
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001296 vTaskSuspendAll();
1297 {
1298 /* Minor optimisation. The tick count cannot change in this
1299 block. */
1300 const TickType_t xConstTickCount = xTickCount;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001301
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001302 /* Generate the tick time at which the task wants to wake. */
1303 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001304
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001305 if( xConstTickCount < *pxPreviousWakeTime )
1306 {
1307 /* The tick count has overflowed since this function was
1308 lasted called. In this case the only time we should ever
1309 actually delay is if the wake time has also overflowed,
1310 and the wake time is greater than the tick time. When this
1311 is the case it is as if neither time had overflowed. */
1312 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1313 {
1314 xShouldDelay = pdTRUE;
1315 }
1316 else
1317 {
1318 mtCOVERAGE_TEST_MARKER();
1319 }
1320 }
1321 else
1322 {
1323 /* The tick time has not overflowed. In this case we will
1324 delay if either the wake time has overflowed, and/or the
1325 tick time is less than the wake time. */
1326 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1327 {
1328 xShouldDelay = pdTRUE;
1329 }
1330 else
1331 {
1332 mtCOVERAGE_TEST_MARKER();
1333 }
1334 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001335
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001336 /* Update the wake time ready for the next call. */
1337 *pxPreviousWakeTime = xTimeToWake;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001338
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001339 if( xShouldDelay != pdFALSE )
1340 {
1341 traceTASK_DELAY_UNTIL( xTimeToWake );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001342
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001343 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1344 the time to wake, so subtract the current tick count. */
1345 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
1346 }
1347 else
1348 {
1349 mtCOVERAGE_TEST_MARKER();
1350 }
1351 }
1352 xAlreadyYielded = xTaskResumeAll();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001353
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001354 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1355 have put ourselves to sleep. */
1356 if( xAlreadyYielded == pdFALSE )
1357 {
1358 portYIELD_WITHIN_API();
1359 }
1360 else
1361 {
1362 mtCOVERAGE_TEST_MARKER();
1363 }
1364 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001365
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001366#endif /* INCLUDE_vTaskDelayUntil */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001367/*-----------------------------------------------------------*/
1368
1369#if ( INCLUDE_vTaskDelay == 1 )
1370
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001371 void vTaskDelay( const TickType_t xTicksToDelay )
1372 {
1373 BaseType_t xAlreadyYielded = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001374
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001375 /* A delay time of zero just forces a reschedule. */
1376 if( xTicksToDelay > ( TickType_t ) 0U )
1377 {
1378 configASSERT( uxSchedulerSuspended == 0 );
1379 vTaskSuspendAll();
1380 {
1381 traceTASK_DELAY();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001382
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001383 /* A task that is removed from the event list while the
1384 scheduler is suspended will not get placed in the ready
1385 list or removed from the blocked list until the scheduler
1386 is resumed.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001387
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001388 This task cannot be in an event list as it is the currently
1389 executing task. */
1390 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
1391 }
1392 xAlreadyYielded = xTaskResumeAll();
1393 }
1394 else
1395 {
1396 mtCOVERAGE_TEST_MARKER();
1397 }
1398
1399 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1400 have put ourselves to sleep. */
1401 if( xAlreadyYielded == pdFALSE )
1402 {
1403 portYIELD_WITHIN_API();
1404 }
1405 else
1406 {
1407 mtCOVERAGE_TEST_MARKER();
1408 }
1409 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001410
1411#endif /* INCLUDE_vTaskDelay */
1412/*-----------------------------------------------------------*/
1413
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001414#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001415
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001416 eTaskState eTaskGetState( TaskHandle_t xTask )
1417 {
1418 eTaskState eReturn;
1419 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
1420 const TCB_t * const pxTCB = xTask;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001421
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001422 configASSERT( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001423
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001424 if( pxTCB == pxCurrentTCB )
1425 {
1426 /* The task calling this function is querying its own state. */
1427 eReturn = eRunning;
1428 }
1429 else
1430 {
1431 taskENTER_CRITICAL();
1432 {
1433 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1434 pxDelayedList = pxDelayedTaskList;
1435 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1436 }
1437 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001438
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001439 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1440 {
1441 /* The task being queried is referenced from one of the Blocked
1442 lists. */
1443 eReturn = eBlocked;
1444 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001445
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001446 #if ( INCLUDE_vTaskSuspend == 1 )
1447 else if( pxStateList == &xSuspendedTaskList )
1448 {
1449 /* The task being queried is referenced from the suspended
1450 list. Is it genuinely suspended or is it blocked
1451 indefinitely? */
1452 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1453 {
1454 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1455 {
1456 /* The task does not appear on the event list item of
1457 and of the RTOS objects, but could still be in the
1458 blocked state if it is waiting on its notification
1459 rather than waiting on an object. */
1460 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1461 {
1462 eReturn = eBlocked;
1463 }
1464 else
1465 {
1466 eReturn = eSuspended;
1467 }
1468 }
1469 #else
1470 {
1471 eReturn = eSuspended;
1472 }
1473 #endif
1474 }
1475 else
1476 {
1477 eReturn = eBlocked;
1478 }
1479 }
1480 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001481
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001482 #if ( INCLUDE_vTaskDelete == 1 )
1483 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1484 {
1485 /* The task being queried is referenced from the deleted
1486 tasks list, or it is not referenced from any lists at
1487 all. */
1488 eReturn = eDeleted;
1489 }
1490 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001491
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001492 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1493 {
1494 /* If the task is not in any other state, it must be in the
1495 Ready (including pending ready) state. */
1496 eReturn = eReady;
1497 }
1498 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001499
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001500 return eReturn;
1501 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001502
1503#endif /* INCLUDE_eTaskGetState */
1504/*-----------------------------------------------------------*/
1505
1506#if ( INCLUDE_uxTaskPriorityGet == 1 )
1507
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001508 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1509 {
1510 TCB_t const *pxTCB;
1511 UBaseType_t uxReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001512
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001513 taskENTER_CRITICAL();
1514 {
1515 /* If null is passed in here then it is the priority of the task
1516 that called uxTaskPriorityGet() that is being queried. */
1517 pxTCB = prvGetTCBFromHandle( xTask );
1518 uxReturn = pxTCB->uxPriority;
1519 }
1520 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001521
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001522 return uxReturn;
1523 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001524
1525#endif /* INCLUDE_uxTaskPriorityGet */
1526/*-----------------------------------------------------------*/
1527
1528#if ( INCLUDE_uxTaskPriorityGet == 1 )
1529
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001530 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1531 {
1532 TCB_t const *pxTCB;
1533 UBaseType_t uxReturn, uxSavedInterruptState;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001534
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001535 /* RTOS ports that support interrupt nesting have the concept of a
1536 maximum system call (or maximum API call) interrupt priority.
1537 Interrupts that are above the maximum system call priority are keep
1538 permanently enabled, even when the RTOS kernel is in a critical section,
1539 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1540 is defined in FreeRTOSConfig.h then
1541 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1542 failure if a FreeRTOS API function is called from an interrupt that has
1543 been assigned a priority above the configured maximum system call
1544 priority. Only FreeRTOS functions that end in FromISR can be called
1545 from interrupts that have been assigned a priority at or (logically)
1546 below the maximum system call interrupt priority. FreeRTOS maintains a
1547 separate interrupt safe API to ensure interrupt entry is as fast and as
1548 simple as possible. More information (albeit Cortex-M specific) is
1549 provided on the following link:
1550 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1551 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001552
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001553 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
1554 {
1555 /* If null is passed in here then it is the priority of the calling
1556 task that is being queried. */
1557 pxTCB = prvGetTCBFromHandle( xTask );
1558 uxReturn = pxTCB->uxPriority;
1559 }
1560 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001561
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001562 return uxReturn;
1563 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001564
1565#endif /* INCLUDE_uxTaskPriorityGet */
1566/*-----------------------------------------------------------*/
1567
1568#if ( INCLUDE_vTaskPrioritySet == 1 )
1569
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001570 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1571 {
1572 TCB_t *pxTCB;
1573 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1574 BaseType_t xYieldRequired = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001575
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001576 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001577
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001578 /* Ensure the new priority is valid. */
1579 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1580 {
1581 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1582 }
1583 else
1584 {
1585 mtCOVERAGE_TEST_MARKER();
1586 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001587
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001588 taskENTER_CRITICAL();
1589 {
1590 /* If null is passed in here then it is the priority of the calling
1591 task that is being changed. */
1592 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001593
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001594 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001595
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001596 #if ( configUSE_MUTEXES == 1 )
1597 {
1598 uxCurrentBasePriority = pxTCB->uxBasePriority;
1599 }
1600 #else
1601 {
1602 uxCurrentBasePriority = pxTCB->uxPriority;
1603 }
1604 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001605
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001606 if( uxCurrentBasePriority != uxNewPriority )
1607 {
1608 /* The priority change may have readied a task of higher
1609 priority than the calling task. */
1610 if( uxNewPriority > uxCurrentBasePriority )
1611 {
1612 if( pxTCB != pxCurrentTCB )
1613 {
1614 /* The priority of a task other than the currently
1615 running task is being raised. Is the priority being
1616 raised above that of the running task? */
1617 if( uxNewPriority >= pxCurrentTCB->uxPriority )
1618 {
1619 xYieldRequired = pdTRUE;
1620 }
1621 else
1622 {
1623 mtCOVERAGE_TEST_MARKER();
1624 }
1625 }
1626 else
1627 {
1628 /* The priority of the running task is being raised,
1629 but the running task must already be the highest
1630 priority task able to run so no yield is required. */
1631 }
1632 }
1633 else if( pxTCB == pxCurrentTCB )
1634 {
1635 /* Setting the priority of the running task down means
1636 there may now be another task of higher priority that
1637 is ready to execute. */
1638 xYieldRequired = pdTRUE;
1639 }
1640 else
1641 {
1642 /* Setting the priority of any other task down does not
1643 require a yield as the running task must be above the
1644 new priority of the task being modified. */
1645 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001646
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001647 /* Remember the ready list the task might be referenced from
1648 before its uxPriority member is changed so the
1649 taskRESET_READY_PRIORITY() macro can function correctly. */
1650 uxPriorityUsedOnEntry = pxTCB->uxPriority;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001651
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001652 #if ( configUSE_MUTEXES == 1 )
1653 {
1654 /* Only change the priority being used if the task is not
1655 currently using an inherited priority. */
1656 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1657 {
1658 pxTCB->uxPriority = uxNewPriority;
1659 }
1660 else
1661 {
1662 mtCOVERAGE_TEST_MARKER();
1663 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001664
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001665 /* The base priority gets set whatever. */
1666 pxTCB->uxBasePriority = uxNewPriority;
1667 }
1668 #else
1669 {
1670 pxTCB->uxPriority = uxNewPriority;
1671 }
1672 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001673
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001674 /* Only reset the event list item value if the value is not
1675 being used for anything else. */
1676 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1677 {
1678 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1679 }
1680 else
1681 {
1682 mtCOVERAGE_TEST_MARKER();
1683 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001684
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001685 /* If the task is in the blocked or suspended list we need do
1686 nothing more than change its priority variable. However, if
1687 the task is in a ready list it needs to be removed and placed
1688 in the list appropriate to its new priority. */
1689 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1690 {
1691 /* The task is currently in its ready list - remove before
1692 adding it to it's new ready list. As we are in a critical
1693 section we can do this even if the scheduler is suspended. */
1694 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1695 {
1696 /* It is known that the task is in its ready list so
1697 there is no need to check again and the port level
1698 reset macro can be called directly. */
1699 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1700 }
1701 else
1702 {
1703 mtCOVERAGE_TEST_MARKER();
1704 }
1705 prvAddTaskToReadyList( pxTCB );
1706 }
1707 else
1708 {
1709 mtCOVERAGE_TEST_MARKER();
1710 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001711
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001712 if( xYieldRequired != pdFALSE )
1713 {
1714 taskYIELD_IF_USING_PREEMPTION();
1715 }
1716 else
1717 {
1718 mtCOVERAGE_TEST_MARKER();
1719 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001720
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001721 /* Remove compiler warning about unused variables when the port
1722 optimised task selection is not being used. */
1723 ( void ) uxPriorityUsedOnEntry;
1724 }
1725 }
1726 taskEXIT_CRITICAL();
1727 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001728
1729#endif /* INCLUDE_vTaskPrioritySet */
1730/*-----------------------------------------------------------*/
1731
1732#if ( INCLUDE_vTaskSuspend == 1 )
1733
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001734 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1735 {
1736 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001737
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001738 taskENTER_CRITICAL();
1739 {
1740 /* If null is passed in here then it is the running task that is
1741 being suspended. */
1742 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001743
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001744 traceTASK_SUSPEND( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001745
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001746 /* Remove task from the ready/delayed list and place in the
1747 suspended list. */
1748 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1749 {
1750 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1751 }
1752 else
1753 {
1754 mtCOVERAGE_TEST_MARKER();
1755 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001756
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001757 /* Is the task waiting on an event also? */
1758 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1759 {
1760 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1761 }
1762 else
1763 {
1764 mtCOVERAGE_TEST_MARKER();
1765 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001766
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001767 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001768
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001769 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1770 {
1771 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1772 {
1773 /* The task was blocked to wait for a notification, but is
1774 now suspended, so no notification was received. */
1775 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1776 }
1777 }
1778 #endif
1779 }
1780 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001781
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001782 if( xSchedulerRunning != pdFALSE )
1783 {
1784 /* Reset the next expected unblock time in case it referred to the
1785 task that is now in the Suspended state. */
1786 taskENTER_CRITICAL();
1787 {
1788 prvResetNextTaskUnblockTime();
1789 }
1790 taskEXIT_CRITICAL();
1791 }
1792 else
1793 {
1794 mtCOVERAGE_TEST_MARKER();
1795 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001796
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001797 if( pxTCB == pxCurrentTCB )
1798 {
1799 if( xSchedulerRunning != pdFALSE )
1800 {
1801 /* The current task has just been suspended. */
1802 configASSERT( uxSchedulerSuspended == 0 );
1803 portYIELD_WITHIN_API();
1804 }
1805 else
1806 {
1807 /* The scheduler is not running, but the task that was pointed
1808 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1809 must be adjusted to point to a different task. */
1810 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1811 {
1812 /* No other tasks are ready, so set pxCurrentTCB back to
1813 NULL so when the next task is created pxCurrentTCB will
1814 be set to point to it no matter what its relative priority
1815 is. */
1816 pxCurrentTCB = NULL;
1817 }
1818 else
1819 {
1820 vTaskSwitchContext();
1821 }
1822 }
1823 }
1824 else
1825 {
1826 mtCOVERAGE_TEST_MARKER();
1827 }
1828 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001829
1830#endif /* INCLUDE_vTaskSuspend */
1831/*-----------------------------------------------------------*/
1832
1833#if ( INCLUDE_vTaskSuspend == 1 )
1834
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001835 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1836 {
1837 BaseType_t xReturn = pdFALSE;
1838 const TCB_t * const pxTCB = xTask;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001839
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001840 /* Accesses xPendingReadyList so must be called from a critical
1841 section. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001842
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001843 /* It does not make sense to check if the calling task is suspended. */
1844 configASSERT( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001845
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001846 /* Is the task being resumed actually in the suspended list? */
1847 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
1848 {
1849 /* Has the task already been resumed from within an ISR? */
1850 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
1851 {
1852 /* Is it in the suspended list because it is in the Suspended
1853 state, or because is is blocked with no timeout? */
1854 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
1855 {
1856 xReturn = pdTRUE;
1857 }
1858 else
1859 {
1860 mtCOVERAGE_TEST_MARKER();
1861 }
1862 }
1863 else
1864 {
1865 mtCOVERAGE_TEST_MARKER();
1866 }
1867 }
1868 else
1869 {
1870 mtCOVERAGE_TEST_MARKER();
1871 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001872
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001873 return xReturn;
1874 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001875
1876#endif /* INCLUDE_vTaskSuspend */
1877/*-----------------------------------------------------------*/
1878
1879#if ( INCLUDE_vTaskSuspend == 1 )
1880
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001881 void vTaskResume( TaskHandle_t xTaskToResume )
1882 {
1883 TCB_t * const pxTCB = xTaskToResume;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001884
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001885 /* It does not make sense to resume the calling task. */
1886 configASSERT( xTaskToResume );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001887
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001888 /* The parameter cannot be NULL as it is impossible to resume the
1889 currently executing task. */
1890 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
1891 {
1892 taskENTER_CRITICAL();
1893 {
1894 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1895 {
1896 traceTASK_RESUME( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001897
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001898 /* The ready list can be accessed even if the scheduler is
1899 suspended because this is inside a critical section. */
1900 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1901 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001902
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001903 /* A higher priority task may have just been resumed. */
1904 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1905 {
1906 /* This yield may not cause the task just resumed to run,
1907 but will leave the lists in the correct state for the
1908 next yield. */
1909 taskYIELD_IF_USING_PREEMPTION();
1910 }
1911 else
1912 {
1913 mtCOVERAGE_TEST_MARKER();
1914 }
1915 }
1916 else
1917 {
1918 mtCOVERAGE_TEST_MARKER();
1919 }
1920 }
1921 taskEXIT_CRITICAL();
1922 }
1923 else
1924 {
1925 mtCOVERAGE_TEST_MARKER();
1926 }
1927 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001928
1929#endif /* INCLUDE_vTaskSuspend */
1930
1931/*-----------------------------------------------------------*/
1932
1933#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1934
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001935 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1936 {
1937 BaseType_t xYieldRequired = pdFALSE;
1938 TCB_t * const pxTCB = xTaskToResume;
1939 UBaseType_t uxSavedInterruptStatus;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001940
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001941 configASSERT( xTaskToResume );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001942
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001943 /* RTOS ports that support interrupt nesting have the concept of a
1944 maximum system call (or maximum API call) interrupt priority.
1945 Interrupts that are above the maximum system call priority are keep
1946 permanently enabled, even when the RTOS kernel is in a critical section,
1947 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1948 is defined in FreeRTOSConfig.h then
1949 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1950 failure if a FreeRTOS API function is called from an interrupt that has
1951 been assigned a priority above the configured maximum system call
1952 priority. Only FreeRTOS functions that end in FromISR can be called
1953 from interrupts that have been assigned a priority at or (logically)
1954 below the maximum system call interrupt priority. FreeRTOS maintains a
1955 separate interrupt safe API to ensure interrupt entry is as fast and as
1956 simple as possible. More information (albeit Cortex-M specific) is
1957 provided on the following link:
1958 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1959 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001960
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001961 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1962 {
1963 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1964 {
1965 traceTASK_RESUME_FROM_ISR( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001966
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001967 /* Check the ready lists can be accessed. */
1968 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
1969 {
1970 /* Ready lists can be accessed so move the task from the
1971 suspended list to the ready list directly. */
1972 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1973 {
1974 xYieldRequired = pdTRUE;
1975 }
1976 else
1977 {
1978 mtCOVERAGE_TEST_MARKER();
1979 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001980
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001981 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1982 prvAddTaskToReadyList( pxTCB );
1983 }
1984 else
1985 {
1986 /* The delayed or ready lists cannot be accessed so the task
1987 is held in the pending ready list until the scheduler is
1988 unsuspended. */
1989 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
1990 }
1991 }
1992 else
1993 {
1994 mtCOVERAGE_TEST_MARKER();
1995 }
1996 }
1997 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001998
xiaohu.huang4f321fb2024-03-22 14:50:29 +08001999 return xYieldRequired;
2000 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002001
2002#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2003/*-----------------------------------------------------------*/
2004
2005void vTaskStartScheduler( void )
2006{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002007BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002008
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002009 /* Add the idle task at the lowest priority. */
2010 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
2011 {
2012 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
2013 StackType_t *pxIdleTaskStackBuffer = NULL;
2014 configSTACK_DEPTH_TYPE ulIdleTaskStackSize;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002015
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002016 /* The Idle task is created using user provided RAM - obtain the
2017 address of the RAM then create the idle task. */
2018 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
2019 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
2020 configIDLE_TASK_NAME,
2021 ulIdleTaskStackSize,
2022 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
2023 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2024 pxIdleTaskStackBuffer,
2025 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002026
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002027 if( xIdleTaskHandle != NULL )
2028 {
2029 xReturn = pdPASS;
2030 }
2031 else
2032 {
2033 xReturn = pdFAIL;
2034 }
2035 }
2036 #else
2037 {
2038 /* The Idle task is being created using dynamically allocated RAM. */
2039 xReturn = xTaskCreate( prvIdleTask,
2040 configIDLE_TASK_NAME,
2041 configMINIMAL_STACK_SIZE,
2042 ( void * ) NULL,
2043 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2044 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2045 }
2046 #endif /* configSUPPORT_STATIC_ALLOCATION */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002047
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002048 #if ( configUSE_TIMERS == 1 )
2049 {
2050 if( xReturn == pdPASS )
2051 {
2052 xReturn = xTimerCreateTimerTask();
2053 }
2054 else
2055 {
2056 mtCOVERAGE_TEST_MARKER();
2057 }
2058 }
2059 #endif /* configUSE_TIMERS */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002060
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002061 if( xReturn == pdPASS )
2062 {
2063 /* freertos_tasks_c_additions_init() should only be called if the user
2064 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2065 the only macro called by the function. */
2066 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2067 {
2068 freertos_tasks_c_additions_init();
2069 }
2070 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002071
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002072 /* Interrupts are turned off here, to ensure a tick does not occur
2073 before or during the call to xPortStartScheduler(). The stacks of
2074 the created tasks contain a status word with interrupts switched on
2075 so interrupts will automatically get re-enabled when the first task
2076 starts to run. */
2077 portDISABLE_INTERRUPTS();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002078
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002079 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2080 {
2081 /* Switch Newlib's _impure_ptr variable to point to the _reent
2082 structure specific to the task that will run first. */
2083 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
2084 }
2085 #endif /* configUSE_NEWLIB_REENTRANT */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002086
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002087 xNextTaskUnblockTime = portMAX_DELAY;
2088 xSchedulerRunning = pdTRUE;
2089 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002090
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002091 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2092 macro must be defined to configure the timer/counter used to generate
2093 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2094 is set to 0 and the following line fails to build then ensure you do not
2095 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2096 FreeRTOSConfig.h file. */
2097 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002098
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002099 traceTASK_SWITCHED_IN();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002100
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002101 /* Setting up the timer tick is hardware specific and thus in the
2102 portable interface. */
2103 if( xPortStartScheduler() != pdFALSE )
2104 {
2105 /* Should not reach here as if the scheduler is running the
2106 function will not return. */
2107 }
2108 else
2109 {
2110 /* Should only reach here if a task calls xTaskEndScheduler(). */
2111 }
2112 }
2113 else
2114 {
2115 /* This line will only be reached if the kernel could not be started,
2116 because there was not enough FreeRTOS heap to create the idle task
2117 or the timer task. */
2118 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2119 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002120
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002121 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2122 meaning xIdleTaskHandle is not used anywhere else. */
2123 ( void ) xIdleTaskHandle;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002124}
2125/*-----------------------------------------------------------*/
2126
2127void vTaskEndScheduler( void )
2128{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002129 /* Stop the scheduler interrupts and call the portable scheduler end
2130 routine so the original ISRs can be restored if necessary. The port
2131 layer must ensure interrupts enable bit is left in the correct state. */
2132 portDISABLE_INTERRUPTS();
2133 xSchedulerRunning = pdFALSE;
2134 vPortEndScheduler();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002135}
2136/*----------------------------------------------------------*/
2137
2138void vTaskSuspendAll( void )
2139{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002140 /* A critical section is not required as the variable is of type
2141 BaseType_t. Please read Richard Barry's reply in the following link to a
2142 post in the FreeRTOS support forum before reporting this as a bug! -
2143 http://goo.gl/wu4acr */
2144 ++uxSchedulerSuspended;
2145 portMEMORY_BARRIER();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002146}
2147/*----------------------------------------------------------*/
2148
2149#if ( configUSE_TICKLESS_IDLE != 0 )
2150
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002151 static TickType_t prvGetExpectedIdleTime( void )
2152 {
2153 TickType_t xReturn;
2154 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002155
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002156 /* uxHigherPriorityReadyTasks takes care of the case where
2157 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
2158 task that are in the Ready state, even though the idle task is
2159 running. */
2160 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
2161 {
2162 if( uxTopReadyPriority > tskIDLE_PRIORITY )
2163 {
2164 uxHigherPriorityReadyTasks = pdTRUE;
2165 }
2166 }
2167 #else
2168 {
2169 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002170
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002171 /* When port optimised task selection is used the uxTopReadyPriority
2172 variable is used as a bit map. If bits other than the least
2173 significant bit are set then there are tasks that have a priority
2174 above the idle priority that are in the Ready state. This takes
2175 care of the case where the co-operative scheduler is in use. */
2176 if( uxTopReadyPriority > uxLeastSignificantBit )
2177 {
2178 uxHigherPriorityReadyTasks = pdTRUE;
2179 }
2180 }
2181 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002182
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002183 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
2184 {
2185 xReturn = 0;
2186 }
2187 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2188 {
2189 /* There are other idle priority tasks in the ready state. If
2190 time slicing is used then the very next tick interrupt must be
2191 processed. */
2192 xReturn = 0;
2193 }
2194 else if( uxHigherPriorityReadyTasks != pdFALSE )
2195 {
2196 /* There are tasks in the Ready state that have a priority above the
2197 idle priority. This path can only be reached if
2198 configUSE_PREEMPTION is 0. */
2199 xReturn = 0;
2200 }
2201 else
2202 {
2203 xReturn = xNextTaskUnblockTime - xTickCount;
2204 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002205
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002206 return xReturn;
2207 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002208
2209#endif /* configUSE_TICKLESS_IDLE */
2210/*----------------------------------------------------------*/
2211
2212BaseType_t xTaskResumeAll( void )
2213{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002214TCB_t *pxTCB = NULL;
2215BaseType_t xAlreadyYielded = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002216
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002217 /* If uxSchedulerSuspended is zero then this function does not match a
2218 previous call to vTaskSuspendAll(). */
2219 configASSERT( uxSchedulerSuspended );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002220
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002221 /* It is possible that an ISR caused a task to be removed from an event
2222 list while the scheduler was suspended. If this was the case then the
2223 removed task will have been added to the xPendingReadyList. Once the
2224 scheduler has been resumed it is safe to move all the pending ready
2225 tasks from this list into their appropriate ready list. */
2226 taskENTER_CRITICAL();
2227 {
2228 --uxSchedulerSuspended;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002229
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002230 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2231 {
2232 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2233 {
2234 /* Move any readied tasks from the pending list into the
2235 appropriate ready list. */
2236 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
2237 {
2238 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2239 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2240 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2241 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002242
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002243 /* If the moved task has a priority higher than the current
2244 task then a yield must be performed. */
2245 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2246 {
2247 xYieldPending = pdTRUE;
2248 }
2249 else
2250 {
2251 mtCOVERAGE_TEST_MARKER();
2252 }
2253 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002254
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002255 if( pxTCB != NULL )
2256 {
2257 /* A task was unblocked while the scheduler was suspended,
2258 which may have prevented the next unblock time from being
2259 re-calculated, in which case re-calculate it now. Mainly
2260 important for low power tickless implementations, where
2261 this can prevent an unnecessary exit from low power
2262 state. */
2263 prvResetNextTaskUnblockTime();
2264 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002265
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002266 /* If any ticks occurred while the scheduler was suspended then
2267 they should be processed now. This ensures the tick count does
2268 not slip, and that any delayed tasks are resumed at the correct
2269 time. */
2270 {
2271 UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002272
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002273 if( uxPendedCounts > ( UBaseType_t ) 0U )
2274 {
2275 do
2276 {
2277 if( xTaskIncrementTick() != pdFALSE )
2278 {
2279 xYieldPending = pdTRUE;
2280 }
2281 else
2282 {
2283 mtCOVERAGE_TEST_MARKER();
2284 }
2285 --uxPendedCounts;
2286 } while( uxPendedCounts > ( UBaseType_t ) 0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002287
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002288 uxPendedTicks = 0;
2289 }
2290 else
2291 {
2292 mtCOVERAGE_TEST_MARKER();
2293 }
2294 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002295
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002296 if( xYieldPending != pdFALSE )
2297 {
2298 #if( configUSE_PREEMPTION != 0 )
2299 {
2300 xAlreadyYielded = pdTRUE;
2301 }
2302 #endif
2303 taskYIELD_IF_USING_PREEMPTION();
2304 }
2305 else
2306 {
2307 mtCOVERAGE_TEST_MARKER();
2308 }
2309 }
2310 }
2311 else
2312 {
2313 mtCOVERAGE_TEST_MARKER();
2314 }
2315 }
2316 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002317
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002318 return xAlreadyYielded;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002319}
2320/*-----------------------------------------------------------*/
2321
2322TickType_t xTaskGetTickCount( void )
2323{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002324TickType_t xTicks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002325
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002326 /* Critical section required if running on a 16 bit processor. */
2327 portTICK_TYPE_ENTER_CRITICAL();
2328 {
2329 xTicks = xTickCount;
2330 }
2331 portTICK_TYPE_EXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002332
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002333 return xTicks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002334}
2335/*-----------------------------------------------------------*/
2336
2337TickType_t xTaskGetTickCountFromISR( void )
2338{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002339TickType_t xReturn;
2340UBaseType_t uxSavedInterruptStatus;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002341
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002342 /* RTOS ports that support interrupt nesting have the concept of a maximum
2343 system call (or maximum API call) interrupt priority. Interrupts that are
2344 above the maximum system call priority are kept permanently enabled, even
2345 when the RTOS kernel is in a critical section, but cannot make any calls to
2346 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2347 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2348 failure if a FreeRTOS API function is called from an interrupt that has been
2349 assigned a priority above the configured maximum system call priority.
2350 Only FreeRTOS functions that end in FromISR can be called from interrupts
2351 that have been assigned a priority at or (logically) below the maximum
2352 system call interrupt priority. FreeRTOS maintains a separate interrupt
2353 safe API to ensure interrupt entry is as fast and as simple as possible.
2354 More information (albeit Cortex-M specific) is provided on the following
2355 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2356 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002357
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002358 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2359 {
2360 xReturn = xTickCount;
2361 }
2362 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002363
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002364 return xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002365}
2366/*-----------------------------------------------------------*/
2367
2368UBaseType_t uxTaskGetNumberOfTasks( void )
2369{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002370 /* A critical section is not required because the variables are of type
2371 BaseType_t. */
2372 return uxCurrentNumberOfTasks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002373}
2374/*-----------------------------------------------------------*/
2375
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002376char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002377{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002378TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002379
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002380 /* If null is passed in here then the name of the calling task is being
2381 queried. */
2382 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2383#ifdef CONFIG_DMALLOC
2384 if (pxTCB == NULL)
2385 return NULL;
2386#else
2387 configASSERT( pxTCB );
2388#endif
2389 return &( pxTCB->pcTaskName[ 0 ] );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002390}
Kelvin Zhang7f929772021-12-31 17:58:17 +08002391
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002392#if ( INCLUDE_xTaskGetHandle == 1 )
2393
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002394 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
2395 {
2396 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
2397 UBaseType_t x;
2398 char cNextChar;
2399 BaseType_t xBreakLoop;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002400
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002401 /* This function is called with the scheduler suspended. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002402
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002403 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2404 {
2405 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002406
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002407 do
2408 {
2409 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002410
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002411 /* Check each character in the name looking for a match or
2412 mismatch. */
2413 xBreakLoop = pdFALSE;
2414 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2415 {
2416 cNextChar = pxNextTCB->pcTaskName[ x ];
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002417
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002418 if( cNextChar != pcNameToQuery[ x ] )
2419 {
2420 /* Characters didn't match. */
2421 xBreakLoop = pdTRUE;
2422 }
2423 else if( cNextChar == ( char ) 0x00 )
2424 {
2425 /* Both strings terminated, a match must have been
2426 found. */
2427 pxReturn = pxNextTCB;
2428 xBreakLoop = pdTRUE;
2429 }
2430 else
2431 {
2432 mtCOVERAGE_TEST_MARKER();
2433 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002434
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002435 if( xBreakLoop != pdFALSE )
2436 {
2437 break;
2438 }
2439 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002440
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002441 if( pxReturn != NULL )
2442 {
2443 /* The handle has been found. */
2444 break;
2445 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002446
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002447 } while( pxNextTCB != pxFirstTCB );
2448 }
2449 else
2450 {
2451 mtCOVERAGE_TEST_MARKER();
2452 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002453
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002454 return pxReturn;
2455 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002456
2457#endif /* INCLUDE_xTaskGetHandle */
2458/*-----------------------------------------------------------*/
2459
2460#if ( INCLUDE_xTaskGetHandle == 1 )
2461
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002462 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2463 {
2464 UBaseType_t uxQueue = configMAX_PRIORITIES;
2465 TCB_t* pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002466
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002467 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2468 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002469
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002470 vTaskSuspendAll();
2471 {
2472 /* Search the ready lists. */
2473 do
2474 {
2475 uxQueue--;
2476 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002477
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002478 if( pxTCB != NULL )
2479 {
2480 /* Found the handle. */
2481 break;
2482 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002483
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002484 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002485
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002486 /* Search the delayed lists. */
2487 if( pxTCB == NULL )
2488 {
2489 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2490 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002491
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002492 if( pxTCB == NULL )
2493 {
2494 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2495 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002496
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002497 #if ( INCLUDE_vTaskSuspend == 1 )
2498 {
2499 if( pxTCB == NULL )
2500 {
2501 /* Search the suspended list. */
2502 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2503 }
2504 }
2505 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002506
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002507 #if( INCLUDE_vTaskDelete == 1 )
2508 {
2509 if( pxTCB == NULL )
2510 {
2511 /* Search the deleted list. */
2512 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2513 }
2514 }
2515 #endif
2516 }
2517 ( void ) xTaskResumeAll();
2518
2519 return pxTCB;
2520 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002521
2522#endif /* INCLUDE_xTaskGetHandle */
2523/*-----------------------------------------------------------*/
2524
2525#if ( configUSE_TRACE_FACILITY == 1 )
2526
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002527 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2528 {
2529 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002530
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002531 vTaskSuspendAll();
2532 {
2533 /* Is there a space in the array for each task in the system? */
2534 if( uxArraySize >= uxCurrentNumberOfTasks )
2535 {
2536 /* Fill in an TaskStatus_t structure with information on each
2537 task in the Ready state. */
2538 do
2539 {
2540 uxQueue--;
2541 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002542
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002543 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002544
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002545 /* Fill in an TaskStatus_t structure with information on each
2546 task in the Blocked state. */
2547 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2548 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002549
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002550 #if( INCLUDE_vTaskDelete == 1 )
2551 {
2552 /* Fill in an TaskStatus_t structure with information on
2553 each task that has been deleted but not yet cleaned up. */
2554 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2555 }
2556 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002557
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002558 #if ( INCLUDE_vTaskSuspend == 1 )
2559 {
2560 /* Fill in an TaskStatus_t structure with information on
2561 each task in the Suspended state. */
2562 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2563 }
2564 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002565
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002566 #if ( configGENERATE_RUN_TIME_STATS == 1)
2567 {
2568 if( pulTotalRunTime != NULL )
2569 {
2570 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2571 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2572 #else
2573 *pulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
2574 #endif
2575 }
2576 }
2577 #else
2578 {
2579 if( pulTotalRunTime != NULL )
2580 {
2581 *pulTotalRunTime = 0;
2582 }
2583 }
2584 #endif
2585 }
2586 else
2587 {
2588 mtCOVERAGE_TEST_MARKER();
2589 }
2590 }
2591 ( void ) xTaskResumeAll();
2592
2593 return uxTask;
2594 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002595
2596#endif /* configUSE_TRACE_FACILITY */
2597/*----------------------------------------------------------*/
2598
2599#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2600
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002601 TaskHandle_t xTaskGetIdleTaskHandle( void )
2602 {
2603 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2604 started, then xIdleTaskHandle will be NULL. */
2605 configASSERT( ( xIdleTaskHandle != NULL ) );
2606 return xIdleTaskHandle;
2607 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002608
2609#endif /* INCLUDE_xTaskGetIdleTaskHandle */
2610/*----------------------------------------------------------*/
2611
2612/* This conditional compilation should use inequality to 0, not equality to 1.
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002613This is to ensure vTaskStepTick() is available when user defined low power mode
2614implementations require configUSE_TICKLESS_IDLE to be set to a value other than
26151. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002616#if ( configUSE_TICKLESS_IDLE != 0 )
2617
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002618 void vTaskStepTick( const TickType_t xTicksToJump )
2619 {
2620 /* Correct the tick count value after a period during which the tick
2621 was suppressed. Note this does *not* call the tick hook function for
2622 each stepped tick. */
2623 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2624 xTickCount += xTicksToJump;
2625 traceINCREASE_TICK_COUNT( xTicksToJump );
2626 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002627
2628#endif /* configUSE_TICKLESS_IDLE */
2629/*----------------------------------------------------------*/
2630
2631#if ( INCLUDE_xTaskAbortDelay == 1 )
2632
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002633 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2634 {
2635 TCB_t *pxTCB = xTask;
2636 BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002637
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002638 configASSERT( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002639
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002640 vTaskSuspendAll();
2641 {
2642 /* A task can only be prematurely removed from the Blocked state if
2643 it is actually in the Blocked state. */
2644 if( eTaskGetState( xTask ) == eBlocked )
2645 {
2646 xReturn = pdPASS;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002647
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002648 /* Remove the reference to the task from the blocked list. An
2649 interrupt won't touch the xStateListItem because the
2650 scheduler is suspended. */
2651 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002652
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002653 /* Is the task waiting on an event also? If so remove it from
2654 the event list too. Interrupts can touch the event list item,
2655 even though the scheduler is suspended, so a critical section
2656 is used. */
2657 taskENTER_CRITICAL();
2658 {
2659 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2660 {
2661 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2662 pxTCB->ucDelayAborted = pdTRUE;
2663 }
2664 else
2665 {
2666 mtCOVERAGE_TEST_MARKER();
2667 }
2668 }
2669 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002670
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002671 /* Place the unblocked task into the appropriate ready list. */
2672 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002673
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002674 /* A task being unblocked cannot cause an immediate context
2675 switch if preemption is turned off. */
2676 #if ( configUSE_PREEMPTION == 1 )
2677 {
2678 /* Preemption is on, but a context switch should only be
2679 performed if the unblocked task has a priority that is
2680 equal to or higher than the currently executing task. */
2681 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2682 {
2683 /* Pend the yield to be performed when the scheduler
2684 is unsuspended. */
2685 xYieldPending = pdTRUE;
2686 }
2687 else
2688 {
2689 mtCOVERAGE_TEST_MARKER();
2690 }
2691 }
2692 #endif /* configUSE_PREEMPTION */
2693 }
2694 else
2695 {
2696 xReturn = pdFAIL;
2697 }
2698 }
2699 ( void ) xTaskResumeAll();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002700
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002701 return xReturn;
2702 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002703
2704#endif /* INCLUDE_xTaskAbortDelay */
2705/*----------------------------------------------------------*/
2706
2707BaseType_t xTaskIncrementTick( void )
2708{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002709TCB_t * pxTCB;
2710TickType_t xItemValue;
2711BaseType_t xSwitchRequired = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002712
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002713 /* Called by the portable layer each time a tick interrupt occurs.
2714 Increments the tick then checks to see if the new tick value will cause any
2715 tasks to be unblocked. */
2716 traceTASK_INCREMENT_TICK( xTickCount );
2717 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2718 {
2719 /* Minor optimisation. The tick count cannot change in this
2720 block. */
2721 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002722
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002723 /* Increment the RTOS tick, switching the delayed and overflowed
2724 delayed lists if it wraps to 0. */
2725 xTickCount = xConstTickCount;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002726
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002727 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
2728 {
2729 taskSWITCH_DELAYED_LISTS();
2730 }
2731 else
2732 {
2733 mtCOVERAGE_TEST_MARKER();
2734 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002735
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002736 /* See if this tick has made a timeout expire. Tasks are stored in
2737 the queue in the order of their wake time - meaning once one task
2738 has been found whose block time has not expired there is no need to
2739 look any further down the list. */
2740 if( xConstTickCount >= xNextTaskUnblockTime )
2741 {
2742 for( ;; )
2743 {
2744 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2745 {
2746 /* The delayed list is empty. Set xNextTaskUnblockTime
2747 to the maximum possible value so it is extremely
2748 unlikely that the
2749 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2750 next time through. */
2751 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2752 break;
2753 }
2754 else
2755 {
2756 /* The delayed list is not empty, get the value of the
2757 item at the head of the delayed list. This is the time
2758 at which the task at the head of the delayed list must
2759 be removed from the Blocked state. */
2760 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2761 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002762
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002763 if( xConstTickCount < xItemValue )
2764 {
2765 /* It is not time to unblock this item yet, but the
2766 item value is the time at which the task at the head
2767 of the blocked list must be removed from the Blocked
2768 state - so record the item value in
2769 xNextTaskUnblockTime. */
2770 xNextTaskUnblockTime = xItemValue;
2771 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
2772 }
2773 else
2774 {
2775 mtCOVERAGE_TEST_MARKER();
2776 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002777
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002778 /* It is time to remove the item from the Blocked state. */
2779 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002780
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002781 /* Is the task waiting on an event also? If so remove
2782 it from the event list. */
2783 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2784 {
2785 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2786 }
2787 else
2788 {
2789 mtCOVERAGE_TEST_MARKER();
2790 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002791
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002792 /* Place the unblocked task into the appropriate ready
2793 list. */
2794 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002795
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002796 /* A task being unblocked cannot cause an immediate
2797 context switch if preemption is turned off. */
2798 #if ( configUSE_PREEMPTION == 1 )
2799 {
2800 /* Preemption is on, but a context switch should
2801 only be performed if the unblocked task has a
2802 priority that is equal to or higher than the
2803 currently executing task. */
2804 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2805 {
2806 xSwitchRequired = pdTRUE;
2807 }
2808 else
2809 {
2810 mtCOVERAGE_TEST_MARKER();
2811 }
2812 }
2813 #endif /* configUSE_PREEMPTION */
2814 }
2815 }
2816 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002817
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002818 /* Tasks of equal priority to the currently running task will share
2819 processing time (time slice) if preemption is on, and the application
2820 writer has not explicitly turned time slicing off. */
2821 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2822 {
2823 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2824 {
2825 xSwitchRequired = pdTRUE;
2826 }
2827 else
2828 {
2829 mtCOVERAGE_TEST_MARKER();
2830 }
2831 }
2832 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002833
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002834 #if ( configUSE_TICK_HOOK == 1 )
2835 {
2836 /* Guard against the tick hook being called when the pended tick
2837 count is being unwound (when the scheduler is being unlocked). */
2838 if( uxPendedTicks == ( UBaseType_t ) 0U )
2839 {
2840 vApplicationTickHook();
2841 }
2842 else
2843 {
2844 mtCOVERAGE_TEST_MARKER();
2845 }
2846 }
2847 #endif /* configUSE_TICK_HOOK */
2848 }
2849 else
2850 {
2851 ++uxPendedTicks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002852
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002853 /* The tick hook gets called at regular intervals, even if the
2854 scheduler is locked. */
2855 #if ( configUSE_TICK_HOOK == 1 )
2856 {
2857 vApplicationTickHook();
2858 }
2859 #endif
2860 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002861
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002862 #if ( configUSE_PREEMPTION == 1 )
2863 {
2864 if( xYieldPending != pdFALSE )
2865 {
2866 xSwitchRequired = pdTRUE;
2867 }
2868 else
2869 {
2870 mtCOVERAGE_TEST_MARKER();
2871 }
2872 }
2873 #endif /* configUSE_PREEMPTION */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002874
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002875 return xSwitchRequired;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002876}
2877/*-----------------------------------------------------------*/
2878
2879#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2880
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002881 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2882 {
2883 TCB_t *xTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002884
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002885 /* If xTask is NULL then it is the task hook of the calling task that is
2886 getting set. */
2887 if( xTask == NULL )
2888 {
2889 xTCB = ( TCB_t * ) pxCurrentTCB;
2890 }
2891 else
2892 {
2893 xTCB = xTask;
2894 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002895
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002896 /* Save the hook function in the TCB. A critical section is required as
2897 the value can be accessed from an interrupt. */
2898 taskENTER_CRITICAL();
2899 {
2900 xTCB->pxTaskTag = pxHookFunction;
2901 }
2902 taskEXIT_CRITICAL();
2903 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002904
2905#endif /* configUSE_APPLICATION_TASK_TAG */
2906/*-----------------------------------------------------------*/
2907
2908#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2909
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002910 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2911 {
2912 TCB_t *pxTCB;
2913 TaskHookFunction_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002914
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002915 /* If xTask is NULL then set the calling task's hook. */
2916 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002917
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002918 /* Save the hook function in the TCB. A critical section is required as
2919 the value can be accessed from an interrupt. */
2920 taskENTER_CRITICAL();
2921 {
2922 xReturn = pxTCB->pxTaskTag;
2923 }
2924 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002925
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002926 return xReturn;
2927 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002928
2929#endif /* configUSE_APPLICATION_TASK_TAG */
2930/*-----------------------------------------------------------*/
2931
2932#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2933
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002934 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
2935 {
2936 TCB_t *pxTCB;
2937 TaskHookFunction_t xReturn;
2938 UBaseType_t uxSavedInterruptStatus;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002939
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002940 /* If xTask is NULL then set the calling task's hook. */
2941 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002942
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002943 /* Save the hook function in the TCB. A critical section is required as
2944 the value can be accessed from an interrupt. */
2945 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
2946 {
2947 xReturn = pxTCB->pxTaskTag;
2948 }
2949 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002950
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002951 return xReturn;
2952 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002953
2954#endif /* configUSE_APPLICATION_TASK_TAG */
2955/*-----------------------------------------------------------*/
2956
2957#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2958
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002959 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2960 {
2961 TCB_t *xTCB;
2962 BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002963
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002964 /* If xTask is NULL then we are calling our own task hook. */
2965 if( xTask == NULL )
2966 {
2967 xTCB = pxCurrentTCB;
2968 }
2969 else
2970 {
2971 xTCB = xTask;
2972 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002973
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002974 if( xTCB->pxTaskTag != NULL )
2975 {
2976 xReturn = xTCB->pxTaskTag( pvParameter );
2977 }
2978 else
2979 {
2980 xReturn = pdFAIL;
2981 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002982
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002983 return xReturn;
2984 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002985
2986#endif /* configUSE_APPLICATION_TASK_TAG */
2987/*-----------------------------------------------------------*/
2988
2989void vTaskSwitchContext( void )
2990{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08002991 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
2992 {
2993 /* The scheduler is currently suspended - do not allow a context
2994 switch. */
2995 xYieldPending = pdTRUE;
2996 }
2997 else
2998 {
2999 xYieldPending = pdFALSE;
3000 traceTASK_SWITCHED_OUT();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003001
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003002 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3003 {
3004 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3005 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3006 #else
3007 ulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
3008 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003009
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003010 /* Add the amount of time the task has been running to the
3011 accumulated time so far. The time the task started running was
3012 stored in ulTaskSwitchedInTime. Note that there is no overflow
3013 protection here so count values are only valid until the timer
3014 overflows. The guard against negative values is to protect
3015 against suspect run time stat counter implementations - which
3016 are provided by the application, not the kernel. */
3017 if( ulTotalRunTime > ulTaskSwitchedInTime )
3018 {
3019 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
shijie.xionga23e8242024-02-27 14:57:02 +08003020#if CONFIG_FTRACE
3021 vTraceSwitchContext((uint32_t)pxCurrentTCB->uxTCBNumber);
3022#endif
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003023 }
3024 else
3025 {
3026 mtCOVERAGE_TEST_MARKER();
3027 }
3028 ulTaskSwitchedInTime = ulTotalRunTime;
3029 }
3030 #endif /* configGENERATE_RUN_TIME_STATS */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003031
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003032 /* Check for stack overflow, if configured. */
3033 taskCHECK_FOR_STACK_OVERFLOW();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003034
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003035 /* Before the currently running task is switched out, save its errno. */
3036 #if( configUSE_POSIX_ERRNO == 1 )
3037 {
3038 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
3039 }
3040 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003041
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003042 /* Select a new task to run using either the generic C or port
3043 optimised asm code. */
3044 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3045 traceTASK_SWITCHED_IN();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003046
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003047 /* After the new task is switched in, update the global errno. */
3048 #if( configUSE_POSIX_ERRNO == 1 )
3049 {
3050 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3051 }
3052 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003053
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003054 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3055 {
3056 /* Switch Newlib's _impure_ptr variable to point to the _reent
3057 structure specific to this task. */
3058 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
3059 }
3060 #endif /* configUSE_NEWLIB_REENTRANT */
3061 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003062}
3063/*-----------------------------------------------------------*/
3064
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003065void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003066{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003067 configASSERT( pxEventList );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003068
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003069 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3070 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003071
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003072 /* Place the event list item of the TCB in the appropriate event list.
3073 This is placed in the list in priority order so the highest priority task
3074 is the first to be woken by the event. The queue that contains the event
3075 list is locked, preventing simultaneous access from interrupts. */
3076 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003077
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003078 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003079}
3080/*-----------------------------------------------------------*/
3081
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003082void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003083{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003084 configASSERT( pxEventList );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003085
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003086 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3087 the event groups implementation. */
3088 configASSERT( uxSchedulerSuspended != 0 );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003089
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003090 /* Store the item value in the event list item. It is safe to access the
3091 event list item here as interrupts won't access the event list item of a
3092 task that is not in the Blocked state. */
3093 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003094
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003095 /* Place the event list item of the TCB at the end of the appropriate event
3096 list. It is safe to access the event list here because it is part of an
3097 event group implementation - and interrupts don't access event groups
3098 directly (instead they access them indirectly by pending function calls to
3099 the task level). */
3100 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003101
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003102 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003103}
3104/*-----------------------------------------------------------*/
3105
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003106#if( configUSE_TIMERS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003107
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003108 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3109 {
3110 configASSERT( pxEventList );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003111
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003112 /* This function should not be called by application code hence the
3113 'Restricted' in its name. It is not part of the public API. It is
3114 designed for use by kernel code, and has special calling requirements -
3115 it should be called with the scheduler suspended. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003116
3117
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003118 /* Place the event list item of the TCB in the appropriate event list.
3119 In this case it is assume that this is the only task that is going to
3120 be waiting on this event list, so the faster vListInsertEnd() function
3121 can be used in place of vListInsert. */
3122 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003123
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003124 /* If the task should block indefinitely then set the block time to a
3125 value that will be recognised as an indefinite delay inside the
3126 prvAddCurrentTaskToDelayedList() function. */
3127 if( xWaitIndefinitely != pdFALSE )
3128 {
3129 xTicksToWait = portMAX_DELAY;
3130 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003131
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003132 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
3133 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
3134 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003135
3136#endif /* configUSE_TIMERS */
3137/*-----------------------------------------------------------*/
3138
3139BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3140{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003141TCB_t *pxUnblockedTCB;
3142BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003143
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003144 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3145 called from a critical section within an ISR. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003146
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003147 /* The event list is sorted in priority order, so the first in the list can
3148 be removed as it is known to be the highest priority. Remove the TCB from
3149 the delayed list, and add it to the ready list.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003150
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003151 If an event is for a queue that is locked then this function will never
3152 get called - the lock count on the queue will get modified instead. This
3153 means exclusive access to the event list is guaranteed here.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003154
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003155 This function assumes that a check has already been made to ensure that
3156 pxEventList is not empty. */
3157 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3158 configASSERT( pxUnblockedTCB );
3159 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003160
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003161 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3162 {
3163 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3164 prvAddTaskToReadyList( pxUnblockedTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003165
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003166 #if( configUSE_TICKLESS_IDLE != 0 )
3167 {
3168 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3169 might be set to the blocked task's time out time. If the task is
3170 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3171 normally left unchanged, because it is automatically reset to a new
3172 value when the tick count equals xNextTaskUnblockTime. However if
3173 tickless idling is used it might be more important to enter sleep mode
3174 at the earliest possible time - so reset xNextTaskUnblockTime here to
3175 ensure it is updated at the earliest possible time. */
3176 prvResetNextTaskUnblockTime();
3177 }
3178 #endif
3179 }
3180 else
3181 {
3182 /* The delayed and ready lists cannot be accessed, so hold this task
3183 pending until the scheduler is resumed. */
3184 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
3185 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003186
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003187 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3188 {
3189 /* Return true if the task removed from the event list has a higher
3190 priority than the calling task. This allows the calling task to know if
3191 it should force a context switch now. */
3192 xReturn = pdTRUE;
3193
3194 /* Mark that a yield is pending in case the user is not using the
3195 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3196 xYieldPending = pdTRUE;
3197 }
3198 else
3199 {
3200 xReturn = pdFALSE;
3201 }
3202
3203 return xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003204}
3205/*-----------------------------------------------------------*/
3206
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003207void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003208{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003209TCB_t *pxUnblockedTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003210
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003211 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3212 the event flags implementation. */
3213 configASSERT( uxSchedulerSuspended != pdFALSE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003214
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003215 /* Store the new item value in the event list. */
3216 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003217
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003218 /* Remove the event list form the event flag. Interrupts do not access
3219 event flags. */
3220 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3221 configASSERT( pxUnblockedTCB );
3222 ( void ) uxListRemove( pxEventListItem );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003223
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003224 /* Remove the task from the delayed list and add it to the ready list. The
3225 scheduler is suspended so interrupts will not be accessing the ready
3226 lists. */
3227 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3228 prvAddTaskToReadyList( pxUnblockedTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003229
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003230 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3231 {
3232 /* The unblocked task has a priority above that of the calling task, so
3233 a context switch is required. This function is called with the
3234 scheduler suspended so xYieldPending is set so the context switch
3235 occurs immediately that the scheduler is resumed (unsuspended). */
3236 xYieldPending = pdTRUE;
3237 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003238}
3239/*-----------------------------------------------------------*/
3240
3241void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3242{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003243 configASSERT( pxTimeOut );
3244 taskENTER_CRITICAL();
3245 {
3246 pxTimeOut->xOverflowCount = xNumOfOverflows;
3247 pxTimeOut->xTimeOnEntering = xTickCount;
3248 }
3249 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003250}
3251/*-----------------------------------------------------------*/
3252
3253void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3254{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003255 /* For internal use only as it does not use a critical section. */
3256 pxTimeOut->xOverflowCount = xNumOfOverflows;
3257 pxTimeOut->xTimeOnEntering = xTickCount;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003258}
3259/*-----------------------------------------------------------*/
3260
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003261BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003262{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003263BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003264
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003265 configASSERT( pxTimeOut );
3266 configASSERT( pxTicksToWait );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003267
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003268 taskENTER_CRITICAL();
3269 {
3270 /* Minor optimisation. The tick count cannot change in this block. */
3271 const TickType_t xConstTickCount = xTickCount;
3272 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003273
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003274 #if( INCLUDE_xTaskAbortDelay == 1 )
3275 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
3276 {
3277 /* The delay was aborted, which is not the same as a time out,
3278 but has the same result. */
3279 pxCurrentTCB->ucDelayAborted = pdFALSE;
3280 xReturn = pdTRUE;
3281 }
3282 else
3283 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003284
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003285 #if ( INCLUDE_vTaskSuspend == 1 )
3286 if( *pxTicksToWait == portMAX_DELAY )
3287 {
3288 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3289 specified is the maximum block time then the task should block
3290 indefinitely, and therefore never time out. */
3291 xReturn = pdFALSE;
3292 }
3293 else
3294 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003295
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003296 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3297 {
3298 /* The tick count is greater than the time at which
3299 vTaskSetTimeout() was called, but has also overflowed since
3300 vTaskSetTimeOut() was called. It must have wrapped all the way
3301 around and gone past again. This passed since vTaskSetTimeout()
3302 was called. */
3303 xReturn = pdTRUE;
3304 }
3305 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3306 {
3307 /* Not a genuine timeout. Adjust parameters for time remaining. */
3308 *pxTicksToWait -= xElapsedTime;
3309 vTaskInternalSetTimeOutState( pxTimeOut );
3310 xReturn = pdFALSE;
3311 }
3312 else
3313 {
3314 *pxTicksToWait = 0;
3315 xReturn = pdTRUE;
3316 }
3317 }
3318 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003319
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003320 return xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003321}
3322/*-----------------------------------------------------------*/
3323
3324void vTaskMissedYield( void )
3325{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003326 xYieldPending = pdTRUE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003327}
3328/*-----------------------------------------------------------*/
3329
3330#if ( configUSE_TRACE_FACILITY == 1 )
3331
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003332 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3333 {
3334 UBaseType_t uxReturn;
3335 TCB_t const *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003336
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003337 if( xTask != NULL )
3338 {
3339 pxTCB = xTask;
3340 uxReturn = pxTCB->uxTCBNumber;
3341 }
3342 else
3343 {
3344 uxReturn = 0U;
3345 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003346
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003347 return uxReturn;
3348 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003349
3350#endif /* configUSE_TRACE_FACILITY */
3351/*-----------------------------------------------------------*/
3352
3353#if ( configUSE_TRACE_FACILITY == 1 )
3354
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003355 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3356 {
3357 TCB_t * pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003358
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003359 if( xTask != NULL )
3360 {
3361 pxTCB = xTask;
3362 pxTCB->uxTaskNumber = uxHandle;
3363 }
3364 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003365
3366#endif /* configUSE_TRACE_FACILITY */
3367
3368/*
3369 * -----------------------------------------------------------
3370 * The Idle task.
3371 * ----------------------------------------------------------
3372 *
3373 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3374 * language extensions. The equivalent prototype for this function is:
3375 *
3376 * void prvIdleTask( void *pvParameters );
3377 *
3378 */
3379static portTASK_FUNCTION( prvIdleTask, pvParameters )
3380{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003381 /* Stop warnings. */
3382 ( void ) pvParameters;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003383
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003384 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3385 SCHEDULER IS STARTED. **/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003386
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003387 /* In case a task that has a secure context deletes itself, in which case
3388 the idle task is responsible for deleting the task's secure context, if
3389 any. */
3390 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003391
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003392 for( ;; )
3393 {
3394 /* See if any tasks have deleted themselves - if so then the idle task
3395 is responsible for freeing the deleted task's TCB and stack. */
3396 prvCheckTasksWaitingTermination();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003397
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003398 #if ( configUSE_PREEMPTION == 0 )
3399 {
3400 /* If we are not using preemption we keep forcing a task switch to
3401 see if any other task has become available. If we are using
3402 preemption we don't need to do this as any task becoming available
3403 will automatically get the processor anyway. */
3404 taskYIELD();
3405 }
3406 #endif /* configUSE_PREEMPTION */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003407
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003408 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3409 {
3410 /* When using preemption tasks of equal priority will be
3411 timesliced. If a task that is sharing the idle priority is ready
3412 to run then the idle task should yield before the end of the
3413 timeslice.
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003414
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003415 A critical region is not required here as we are just reading from
3416 the list, and an occasional incorrect value will not matter. If
3417 the ready list at the idle priority contains more than one task
3418 then a task other than the idle task is ready to execute. */
3419 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3420 {
3421 taskYIELD();
3422 }
3423 else
3424 {
3425 mtCOVERAGE_TEST_MARKER();
3426 }
3427 }
3428 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003429
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003430 #if ( configUSE_IDLE_HOOK == 1 )
3431 {
3432 extern void vApplicationIdleHook( void );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003433
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003434 /* Call the user defined function from within the idle task. This
3435 allows the application designer to add background functionality
3436 without the overhead of a separate task.
3437 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3438 CALL A FUNCTION THAT MIGHT BLOCK. */
3439 vApplicationIdleHook();
3440 }
3441 #endif /* configUSE_IDLE_HOOK */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003442
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003443 /* This conditional compilation should use inequality to 0, not equality
3444 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3445 user defined low power mode implementations require
3446 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3447 #if ( configUSE_TICKLESS_IDLE != 0 )
3448 {
3449 TickType_t xExpectedIdleTime;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003450
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003451 /* It is not desirable to suspend then resume the scheduler on
3452 each iteration of the idle task. Therefore, a preliminary
3453 test of the expected idle time is performed without the
3454 scheduler suspended. The result here is not necessarily
3455 valid. */
3456 xExpectedIdleTime = prvGetExpectedIdleTime();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003457
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003458 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3459 {
3460 vTaskSuspendAll();
3461 {
3462 /* Now the scheduler is suspended, the expected idle
3463 time can be sampled again, and this time its value can
3464 be used. */
3465 configASSERT( xNextTaskUnblockTime >= xTickCount );
3466 xExpectedIdleTime = prvGetExpectedIdleTime();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003467
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003468 /* Define the following macro to set xExpectedIdleTime to 0
3469 if the application does not want
3470 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3471 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3472
3473 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3474 {
3475 traceLOW_POWER_IDLE_BEGIN();
3476 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3477 traceLOW_POWER_IDLE_END();
3478 }
3479 else
3480 {
3481 mtCOVERAGE_TEST_MARKER();
3482 }
3483 }
3484 ( void ) xTaskResumeAll();
3485 }
3486 else
3487 {
3488 mtCOVERAGE_TEST_MARKER();
3489 }
3490 }
3491 #endif /* configUSE_TICKLESS_IDLE */
3492 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003493}
3494/*-----------------------------------------------------------*/
3495
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003496#if( configUSE_TICKLESS_IDLE != 0 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003497
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003498 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3499 {
3500 /* The idle task exists in addition to the application tasks. */
3501 const UBaseType_t uxNonApplicationTasks = 1;
3502 eSleepModeStatus eReturn = eStandardSleep;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003503
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003504 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
3505 {
3506 /* A task was made ready while the scheduler was suspended. */
3507 eReturn = eAbortSleep;
3508 }
3509 else if( xYieldPending != pdFALSE )
3510 {
3511 /* A yield was pended while the scheduler was suspended. */
3512 eReturn = eAbortSleep;
3513 }
3514 else
3515 {
3516 /* If all the tasks are in the suspended list (which might mean they
3517 have an infinite block time rather than actually being suspended)
3518 then it is safe to turn all clocks off and just wait for external
3519 interrupts. */
3520 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3521 {
3522 eReturn = eNoTasksWaitingTimeout;
3523 }
3524 else
3525 {
3526 mtCOVERAGE_TEST_MARKER();
3527 }
3528 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003529
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003530 return eReturn;
3531 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003532
3533#endif /* configUSE_TICKLESS_IDLE */
3534/*-----------------------------------------------------------*/
3535
3536#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3537
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003538 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3539 {
3540 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003541
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003542 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3543 {
3544 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3545 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3546 }
3547 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003548
3549#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3550/*-----------------------------------------------------------*/
3551
3552#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3553
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003554 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3555 {
3556 void *pvReturn = NULL;
3557 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003558
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003559 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3560 {
3561 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3562 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3563 }
3564 else
3565 {
3566 pvReturn = NULL;
3567 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003568
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003569 return pvReturn;
3570 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003571
3572#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3573/*-----------------------------------------------------------*/
3574
3575#if ( portUSING_MPU_WRAPPERS == 1 )
3576
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003577 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3578 {
3579 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003580
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003581 /* If null is passed in here then we are modifying the MPU settings of
3582 the calling task. */
3583 pxTCB = prvGetTCBFromHandle( xTaskToModify );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003584
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003585 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3586 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003587
3588#endif /* portUSING_MPU_WRAPPERS */
3589/*-----------------------------------------------------------*/
3590
3591static void prvInitialiseTaskLists( void )
3592{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003593UBaseType_t uxPriority;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003594
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003595 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3596 {
3597 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3598 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003599
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003600 vListInitialise( &xDelayedTaskList1 );
3601 vListInitialise( &xDelayedTaskList2 );
3602 vListInitialise( &xPendingReadyList );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003603
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003604 #if ( INCLUDE_vTaskDelete == 1 )
3605 {
3606 vListInitialise( &xTasksWaitingTermination );
3607 }
3608 #endif /* INCLUDE_vTaskDelete */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003609
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003610 #if ( INCLUDE_vTaskSuspend == 1 )
3611 {
3612 vListInitialise( &xSuspendedTaskList );
3613 }
3614 #endif /* INCLUDE_vTaskSuspend */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003615
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003616 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3617 using list2. */
3618 pxDelayedTaskList = &xDelayedTaskList1;
3619 pxOverflowDelayedTaskList = &xDelayedTaskList2;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003620}
3621/*-----------------------------------------------------------*/
3622
3623static void prvCheckTasksWaitingTermination( void )
3624{
3625
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003626 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003627
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003628 #if ( INCLUDE_vTaskDelete == 1 )
3629 {
3630 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003631
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003632 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
3633 being called too often in the idle task. */
3634 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
3635 {
3636 taskENTER_CRITICAL();
3637 {
3638 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3639 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3640 --uxCurrentNumberOfTasks;
3641 --uxDeletedTasksWaitingCleanUp;
3642 }
3643 taskEXIT_CRITICAL();
3644
3645 prvDeleteTCB( pxTCB );
3646 }
3647 }
3648 #endif /* INCLUDE_vTaskDelete */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003649}
3650/*-----------------------------------------------------------*/
3651
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003652#if( configUSE_TRACE_FACILITY == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003653
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003654 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
3655 {
3656 TCB_t *pxTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003657
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003658 /* xTask is NULL then get the state of the calling task. */
3659 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003660
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003661 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
3662 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
3663 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
3664 pxTaskStatus->pxStackBase = pxTCB->pxStack;
3665 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
3666 pxTaskStatus->uStackTotal = pxTCB->uStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003667
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003668 #if ( configUSE_MUTEXES == 1 )
3669 {
3670 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
3671 }
3672 #else
3673 {
3674 pxTaskStatus->uxBasePriority = 0;
3675 }
3676 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003677
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003678 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3679 {
3680 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
3681 }
3682 #else
3683 {
3684 pxTaskStatus->ulRunTimeCounter = 0;
3685 }
3686 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003687
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003688 /* Obtaining the task state is a little fiddly, so is only done if the
3689 value of eState passed into this function is eInvalid - otherwise the
3690 state is just set to whatever is passed in. */
3691 if( eState != eInvalid )
3692 {
3693 if( pxTCB == pxCurrentTCB )
3694 {
3695 pxTaskStatus->eCurrentState = eRunning;
3696 }
3697 else
3698 {
3699 pxTaskStatus->eCurrentState = eState;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003700
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003701 #if ( INCLUDE_vTaskSuspend == 1 )
3702 {
3703 /* If the task is in the suspended list then there is a
3704 chance it is actually just blocked indefinitely - so really
3705 it should be reported as being in the Blocked state. */
3706 if( eState == eSuspended )
3707 {
3708 vTaskSuspendAll();
3709 {
3710 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3711 {
3712 pxTaskStatus->eCurrentState = eBlocked;
3713 }
3714 }
3715 ( void ) xTaskResumeAll();
3716 }
3717 }
3718 #endif /* INCLUDE_vTaskSuspend */
3719 }
3720 }
3721 else
3722 {
3723 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
3724 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003725
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003726 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
3727 parameter is provided to allow it to be skipped. */
3728 if( xGetFreeStackSpace != pdFALSE )
3729 {
3730 #if ( portSTACK_GROWTH > 0 )
3731 {
3732 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
3733 }
3734 #else
3735 {
3736 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
3737 }
3738 #endif
3739 }
3740 else
3741 {
3742 pxTaskStatus->usStackHighWaterMark = 0;
3743 }
3744 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003745
3746#endif /* configUSE_TRACE_FACILITY */
3747/*-----------------------------------------------------------*/
3748
3749#if ( configUSE_TRACE_FACILITY == 1 )
3750
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003751 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3752 {
3753 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
3754 UBaseType_t uxTask = 0;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003755
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003756 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3757 {
3758 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003759
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003760 /* Populate an TaskStatus_t structure within the
3761 pxTaskStatusArray array for each task that is referenced from
3762 pxList. See the definition of TaskStatus_t in task.h for the
3763 meaning of each TaskStatus_t structure member. */
3764 do
3765 {
3766 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3767 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
3768 uxTask++;
3769 } while( pxNextTCB != pxFirstTCB );
3770 }
3771 else
3772 {
3773 mtCOVERAGE_TEST_MARKER();
3774 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003775
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003776 return uxTask;
3777 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003778
3779#endif /* configUSE_TRACE_FACILITY */
3780/*-----------------------------------------------------------*/
3781
3782#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
3783
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003784 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3785 {
3786 uint32_t ulCount = 0U;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003787
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003788 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3789 {
3790 pucStackByte -= portSTACK_GROWTH;
3791 ulCount++;
3792 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003793
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003794 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003795
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003796 return ( configSTACK_DEPTH_TYPE ) ulCount;
3797 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003798
3799#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
3800/*-----------------------------------------------------------*/
3801
3802#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
3803
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003804 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
3805 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
3806 user to determine the return type. It gets around the problem of the value
3807 overflowing on 8-bit types without breaking backward compatibility for
3808 applications that expect an 8-bit return type. */
3809 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
3810 {
3811 TCB_t *pxTCB;
3812 uint8_t *pucEndOfStack;
3813 configSTACK_DEPTH_TYPE uxReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003814
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003815 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
3816 the same except for their return type. Using configSTACK_DEPTH_TYPE
3817 allows the user to determine the return type. It gets around the
3818 problem of the value overflowing on 8-bit types without breaking
3819 backward compatibility for applications that expect an 8-bit return
3820 type. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003821
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003822 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003823
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003824 #if portSTACK_GROWTH < 0
3825 {
3826 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3827 }
3828 #else
3829 {
3830 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3831 }
3832 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003833
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003834 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003835
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003836 return uxReturn;
3837 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003838
3839#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
3840/*-----------------------------------------------------------*/
3841
3842#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3843
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003844 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3845 {
3846 TCB_t *pxTCB;
3847 uint8_t *pucEndOfStack;
3848 UBaseType_t uxReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003849
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003850 pxTCB = prvGetTCBFromHandle( xTask );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003851
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003852 #if portSTACK_GROWTH < 0
3853 {
3854 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3855 }
3856 #else
3857 {
3858 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3859 }
3860 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003861
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003862 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003863
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003864 return uxReturn;
3865 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003866
3867#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3868/*-----------------------------------------------------------*/
3869
3870#if ( INCLUDE_vTaskDelete == 1 )
3871
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003872 static void prvDeleteTCB( TCB_t *pxTCB )
3873 {
3874 /* This call is required specifically for the TriCore port. It must be
3875 above the vPortFree() calls. The call is also used by ports/demos that
3876 want to allocate and clean RAM statically. */
3877 portCLEAN_UP_TCB( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003878
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003879 /* Free up the memory allocated by the scheduler for the task. It is up
3880 to the task to free any memory allocated at the application level. */
3881 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3882 {
3883 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3884 }
3885 #endif /* configUSE_NEWLIB_REENTRANT */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003886
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003887 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3888 {
3889 /* The task can only have been allocated dynamically - free both
3890 the stack and TCB. */
3891 vPortFree( pxTCB->pxStack );
3892 vPortFree( pxTCB );
3893 }
3894 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
3895 {
3896 /* The task could have been allocated statically or dynamically, so
3897 check what was statically allocated before trying to free the
3898 memory. */
3899 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3900 {
3901 /* Both the stack and TCB were allocated dynamically, so both
3902 must be freed. */
3903 vPortFree( pxTCB->pxStack );
3904 vPortFree( pxTCB );
3905 }
3906 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3907 {
3908 /* Only the stack was statically allocated, so the TCB is the
3909 only memory that must be freed. */
3910 vPortFree( pxTCB );
3911 }
3912 else
3913 {
3914 /* Neither the stack nor the TCB were allocated dynamically, so
3915 nothing needs to be freed. */
3916 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
3917 mtCOVERAGE_TEST_MARKER();
3918 }
3919 }
3920 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3921 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003922
3923#endif /* INCLUDE_vTaskDelete */
3924/*-----------------------------------------------------------*/
3925
3926static void prvResetNextTaskUnblockTime( void )
3927{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003928TCB_t *pxTCB;
3929
3930 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3931 {
3932 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
3933 the maximum possible value so it is extremely unlikely that the
3934 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3935 there is an item in the delayed list. */
3936 xNextTaskUnblockTime = portMAX_DELAY;
3937 }
3938 else
3939 {
3940 /* The new current delayed list is not empty, get the value of
3941 the item at the head of the delayed list. This is the time at
3942 which the task at the head of the delayed list should be removed
3943 from the Blocked state. */
3944 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3945 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
3946 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003947}
3948/*-----------------------------------------------------------*/
3949
3950#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3951
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003952 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3953 {
3954 TaskHandle_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003955
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003956 /* A critical section is not required as this is not called from
3957 an interrupt and the current TCB will always be the same for any
3958 individual execution thread. */
3959 xReturn = pxCurrentTCB;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003960
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003961 return xReturn;
3962 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003963
3964#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
3965/*-----------------------------------------------------------*/
3966
3967#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
3968
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003969 BaseType_t xTaskGetSchedulerState( void )
3970 {
3971 BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003972
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003973 if( xSchedulerRunning == pdFALSE )
3974 {
3975 xReturn = taskSCHEDULER_NOT_STARTED;
3976 }
3977 else
3978 {
3979 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3980 {
3981 xReturn = taskSCHEDULER_RUNNING;
3982 }
3983 else
3984 {
3985 xReturn = taskSCHEDULER_SUSPENDED;
3986 }
3987 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003988
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003989 return xReturn;
3990 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003991
3992#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
3993/*-----------------------------------------------------------*/
3994
3995#if ( configUSE_MUTEXES == 1 )
3996
xiaohu.huang4f321fb2024-03-22 14:50:29 +08003997 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
3998 {
3999 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4000 BaseType_t xReturn = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004001
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004002 /* If the mutex was given back by an interrupt while the queue was
4003 locked then the mutex holder might now be NULL. _RB_ Is this still
4004 needed as interrupts can no longer use mutexes? */
4005 if( pxMutexHolder != NULL )
4006 {
4007 /* If the holder of the mutex has a priority below the priority of
4008 the task attempting to obtain the mutex then it will temporarily
4009 inherit the priority of the task attempting to obtain the mutex. */
4010 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
4011 {
4012 /* Adjust the mutex holder state to account for its new
4013 priority. Only reset the event list item value if the value is
4014 not being used for anything else. */
4015 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4016 {
4017 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4018 }
4019 else
4020 {
4021 mtCOVERAGE_TEST_MARKER();
4022 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004023
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004024 /* If the task being modified is in the ready state it will need
4025 to be moved into a new list. */
4026 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4027 {
4028 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4029 {
4030 taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );
4031 }
4032 else
4033 {
4034 mtCOVERAGE_TEST_MARKER();
4035 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004036
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004037 /* Inherit the priority before being moved into the new list. */
4038 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4039 prvAddTaskToReadyList( pxMutexHolderTCB );
4040 }
4041 else
4042 {
4043 /* Just inherit the priority. */
4044 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4045 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004046
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004047 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004048
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004049 /* Inheritance occurred. */
4050 xReturn = pdTRUE;
4051 }
4052 else
4053 {
4054 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
4055 {
4056 /* The base priority of the mutex holder is lower than the
4057 priority of the task attempting to take the mutex, but the
4058 current priority of the mutex holder is not lower than the
4059 priority of the task attempting to take the mutex.
4060 Therefore the mutex holder must have already inherited a
4061 priority, but inheritance would have occurred if that had
4062 not been the case. */
4063 xReturn = pdTRUE;
4064 }
4065 else
4066 {
4067 mtCOVERAGE_TEST_MARKER();
4068 }
4069 }
4070 }
4071 else
4072 {
4073 mtCOVERAGE_TEST_MARKER();
4074 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004075
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004076 return xReturn;
4077 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004078
4079#endif /* configUSE_MUTEXES */
4080/*-----------------------------------------------------------*/
4081
4082#if ( configUSE_MUTEXES == 1 )
4083
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004084 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4085 {
4086 TCB_t * const pxTCB = pxMutexHolder;
4087 BaseType_t xReturn = pdFALSE;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004088
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004089 if( pxMutexHolder != NULL )
4090 {
4091 /* A task can only have an inherited priority if it holds the mutex.
4092 If the mutex is held by a task then it cannot be given from an
4093 interrupt, and if a mutex is given by the holding task then it must
4094 be the running state task. */
4095 configASSERT( pxTCB == pxCurrentTCB );
4096 configASSERT( pxTCB->uxMutexesHeld );
4097 ( pxTCB->uxMutexesHeld )--;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004098
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004099 /* Has the holder of the mutex inherited the priority of another
4100 task? */
4101 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4102 {
4103 /* Only disinherit if no other mutexes are held. */
4104 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4105 {
4106 /* A task can only have an inherited priority if it holds
4107 the mutex. If the mutex is held by a task then it cannot be
4108 given from an interrupt, and if a mutex is given by the
4109 holding task then it must be the running state task. Remove
4110 the holding task from the ready list. */
4111 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4112 {
4113 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4114 }
4115 else
4116 {
4117 mtCOVERAGE_TEST_MARKER();
4118 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004119
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004120 /* Disinherit the priority before adding the task into the
4121 new ready list. */
4122 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4123 pxTCB->uxPriority = pxTCB->uxBasePriority;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004124
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004125 /* Reset the event list item value. It cannot be in use for
4126 any other purpose if this task is running, and it must be
4127 running to give back the mutex. */
4128 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4129 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004130
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004131 /* Return true to indicate that a context switch is required.
4132 This is only actually required in the corner case whereby
4133 multiple mutexes were held and the mutexes were given back
4134 in an order different to that in which they were taken.
4135 If a context switch did not occur when the first mutex was
4136 returned, even if a task was waiting on it, then a context
4137 switch should occur when the last mutex is returned whether
4138 a task is waiting on it or not. */
4139 xReturn = pdTRUE;
4140 }
4141 else
4142 {
4143 mtCOVERAGE_TEST_MARKER();
4144 }
4145 }
4146 else
4147 {
4148 mtCOVERAGE_TEST_MARKER();
4149 }
4150 }
4151 else
4152 {
4153 mtCOVERAGE_TEST_MARKER();
4154 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004155
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004156 return xReturn;
4157 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004158
4159#endif /* configUSE_MUTEXES */
4160/*-----------------------------------------------------------*/
4161
4162#if ( configUSE_MUTEXES == 1 )
4163
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004164 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
4165 {
4166 TCB_t * const pxTCB = pxMutexHolder;
4167 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4168 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004169
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004170 if( pxMutexHolder != NULL )
4171 {
4172 /* If pxMutexHolder is not NULL then the holder must hold at least
4173 one mutex. */
4174 configASSERT( pxTCB->uxMutexesHeld );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004175
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004176 /* Determine the priority to which the priority of the task that
4177 holds the mutex should be set. This will be the greater of the
4178 holding task's base priority and the priority of the highest
4179 priority task that is waiting to obtain the mutex. */
4180 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4181 {
4182 uxPriorityToUse = uxHighestPriorityWaitingTask;
4183 }
4184 else
4185 {
4186 uxPriorityToUse = pxTCB->uxBasePriority;
4187 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004188
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004189 /* Does the priority need to change? */
4190 if( pxTCB->uxPriority != uxPriorityToUse )
4191 {
4192 /* Only disinherit if no other mutexes are held. This is a
4193 simplification in the priority inheritance implementation. If
4194 the task that holds the mutex is also holding other mutexes then
4195 the other mutexes may have caused the priority inheritance. */
4196 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4197 {
4198 /* If a task has timed out because it already holds the
4199 mutex it was trying to obtain then it cannot of inherited
4200 its own priority. */
4201 configASSERT( pxTCB != pxCurrentTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004202
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004203 /* Disinherit the priority, remembering the previous
4204 priority to facilitate determining the subject task's
4205 state. */
4206 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4207 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4208 pxTCB->uxPriority = uxPriorityToUse;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004209
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004210 /* Only reset the event list item value if the value is not
4211 being used for anything else. */
4212 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4213 {
4214 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4215 }
4216 else
4217 {
4218 mtCOVERAGE_TEST_MARKER();
4219 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004220
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004221 /* If the running task is not the task that holds the mutex
4222 then the task that holds the mutex could be in either the
4223 Ready, Blocked or Suspended states. Only remove the task
4224 from its current state list if it is in the Ready state as
4225 the task's priority is going to change and there is one
4226 Ready list per priority. */
4227 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4228 {
4229 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4230 {
4231 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4232 }
4233 else
4234 {
4235 mtCOVERAGE_TEST_MARKER();
4236 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004237
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004238 prvAddTaskToReadyList( pxTCB );
4239 }
4240 else
4241 {
4242 mtCOVERAGE_TEST_MARKER();
4243 }
4244 }
4245 else
4246 {
4247 mtCOVERAGE_TEST_MARKER();
4248 }
4249 }
4250 else
4251 {
4252 mtCOVERAGE_TEST_MARKER();
4253 }
4254 }
4255 else
4256 {
4257 mtCOVERAGE_TEST_MARKER();
4258 }
4259 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004260
4261#endif /* configUSE_MUTEXES */
4262/*-----------------------------------------------------------*/
4263
4264#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4265
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004266 void vTaskEnterCritical( void )
4267 {
4268 portDISABLE_INTERRUPTS();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004269
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004270 if( xSchedulerRunning != pdFALSE )
4271 {
4272 ( pxCurrentTCB->uxCriticalNesting )++;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004273
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004274 /* This is not the interrupt safe version of the enter critical
4275 function so assert() if it is being called from an interrupt
4276 context. Only API functions that end in "FromISR" can be used in an
4277 interrupt. Only assert if the critical nesting count is 1 to
4278 protect against recursive calls if the assert function also uses a
4279 critical section. */
4280 if( pxCurrentTCB->uxCriticalNesting == 1 )
4281 {
4282 portASSERT_IF_IN_ISR();
4283 }
4284 }
4285 else
4286 {
4287 mtCOVERAGE_TEST_MARKER();
4288 }
4289 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004290
4291#endif /* portCRITICAL_NESTING_IN_TCB */
4292/*-----------------------------------------------------------*/
4293
4294#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4295
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004296 void vTaskExitCritical( void )
4297 {
4298 if( xSchedulerRunning != pdFALSE )
4299 {
4300 if( pxCurrentTCB->uxCriticalNesting > 0U )
4301 {
4302 ( pxCurrentTCB->uxCriticalNesting )--;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004303
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004304 if( pxCurrentTCB->uxCriticalNesting == 0U )
4305 {
4306 portENABLE_INTERRUPTS();
4307 }
4308 else
4309 {
4310 mtCOVERAGE_TEST_MARKER();
4311 }
4312 }
4313 else
4314 {
4315 mtCOVERAGE_TEST_MARKER();
4316 }
4317 }
4318 else
4319 {
4320 mtCOVERAGE_TEST_MARKER();
4321 }
4322 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004323
4324#endif /* portCRITICAL_NESTING_IN_TCB */
4325/*-----------------------------------------------------------*/
4326
4327#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4328
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004329 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4330 {
4331 size_t x;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004332
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004333 /* Start by copying the entire string. */
4334 strcpy( pcBuffer, pcTaskName );
4335
4336 /* Pad the end of the string with spaces to ensure columns line up when
4337 printed out. */
4338 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4339 {
4340 pcBuffer[ x ] = ' ';
4341 }
4342
4343 /* Terminate. */
4344 pcBuffer[ x ] = ( char ) 0x00;
4345
4346 /* Return the new end of string. */
4347 return &( pcBuffer[ x ] );
4348 }
4349
4350#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4351/*-----------------------------------------------------------*/
4352
4353#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4354
4355 void vTaskList( char * pcWriteBuffer )
4356 {
4357 TaskStatus_t *pxTaskStatusArray;
4358 UBaseType_t uxArraySize, x;
4359 char cStatus;
4360 uint32_t ulTotalTime, ulStatsAsPercentage;
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004361
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004362
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004363 /*
4364 * PLEASE NOTE:
4365 *
4366 * This function is provided for convenience only, and is used by many
4367 * of the demo applications. Do not consider it to be part of the
4368 * scheduler.
4369 *
4370 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4371 * uxTaskGetSystemState() output into a human readable table that
4372 * displays task names, states and stack usage.
4373 *
4374 * vTaskList() has a dependency on the sprintf() C library function that
4375 * might bloat the code size, use a lot of stack, and provide different
4376 * results on different platforms. An alternative, tiny, third party,
4377 * and limited functionality implementation of sprintf() is provided in
4378 * many of the FreeRTOS/Demo sub-directories in a file called
4379 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4380 * snprintf() implementation!).
4381 *
4382 * It is recommended that production systems call uxTaskGetSystemState()
4383 * directly to get access to raw stats data, rather than indirectly
4384 * through a call to vTaskList().
4385 */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004386
4387
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004388 /* Make sure the write buffer does not contain a string. */
4389 *pcWriteBuffer = ( char ) 0x00;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004390
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004391 /* Take a snapshot of the number of tasks in case it changes while this
4392 function is executing. */
4393 uxArraySize = uxCurrentNumberOfTasks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004394
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004395 /* Allocate an array index for each task. NOTE! if
4396 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4397 equate to NULL. */
4398 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004399
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004400 if( pxTaskStatusArray != NULL )
4401 {
4402 /* Generate the (binary) data. */
4403 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4404 ulTotalTime /= 100UL;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004405
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004406 /* Create a human readable table from the binary data. */
4407 for( x = 0; x < uxArraySize; x++ )
4408 {
4409 switch( pxTaskStatusArray[ x ].eCurrentState )
4410 {
4411 case eRunning: cStatus = tskRUNNING_CHAR;
4412 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004413
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004414 case eReady: cStatus = tskREADY_CHAR;
4415 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004416
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004417 case eBlocked: cStatus = tskBLOCKED_CHAR;
4418 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004419
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004420 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4421 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004422
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004423 case eDeleted: cStatus = tskDELETED_CHAR;
4424 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004425
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004426 case eInvalid: /* Fall through. */
4427 default: /* Should not get here, but it is included
4428 to prevent static checking errors. */
4429 cStatus = ( char ) 0x00;
4430 break;
4431 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004432
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004433 /* Write the task name to the string, padding with spaces so it
4434 can be printed in tabular form more easily. */
4435 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004436
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004437
4438 ulStatsAsPercentage = ulTotalTime == 0 ? 0 : pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
4439 /* Write the rest of the string. */
4440 sprintf( pcWriteBuffer, "\t%u\t%c\t%u\t\t%u\t\t%u\t\t%u\t%u\t\r\n",
4441 ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
4442 cStatus,
4443 ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
4444 ( unsigned int ) pxTaskStatusArray[ x ].uStackTotal,
4445 ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
4446 ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
4447 ( unsigned int ) ulStatsAsPercentage);
4448 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4449 }
4450
4451 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4452 is 0 then vPortFree() will be #defined to nothing. */
4453 vPortFree( pxTaskStatusArray );
4454 }
4455 else
4456 {
4457 mtCOVERAGE_TEST_MARKER();
4458 }
4459 }
4460
4461#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004462/*----------------------------------------------------------*/
4463
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004464#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004465
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004466 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4467 {
4468 TaskStatus_t *pxTaskStatusArray;
4469 UBaseType_t uxArraySize, x;
4470 uint32_t ulTotalTime, ulStatsAsPercentage;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004471
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004472 #if( configUSE_TRACE_FACILITY != 1 )
4473 {
4474 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4475 }
4476 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004477
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004478 /*
4479 * PLEASE NOTE:
4480 *
4481 * This function is provided for convenience only, and is used by many
4482 * of the demo applications. Do not consider it to be part of the
4483 * scheduler.
4484 *
4485 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4486 * of the uxTaskGetSystemState() output into a human readable table that
4487 * displays the amount of time each task has spent in the Running state
4488 * in both absolute and percentage terms.
4489 *
4490 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4491 * function that might bloat the code size, use a lot of stack, and
4492 * provide different results on different platforms. An alternative,
4493 * tiny, third party, and limited functionality implementation of
4494 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4495 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4496 * a full snprintf() implementation!).
4497 *
4498 * It is recommended that production systems call uxTaskGetSystemState()
4499 * directly to get access to raw stats data, rather than indirectly
4500 * through a call to vTaskGetRunTimeStats().
4501 */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004502
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004503 /* Make sure the write buffer does not contain a string. */
4504 *pcWriteBuffer = ( char ) 0x00;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004505
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004506 /* Take a snapshot of the number of tasks in case it changes while this
4507 function is executing. */
4508 uxArraySize = uxCurrentNumberOfTasks;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004509
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004510 /* Allocate an array index for each task. NOTE! If
4511 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4512 equate to NULL. */
4513 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004514
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004515 if( pxTaskStatusArray != NULL )
4516 {
4517 /* Generate the (binary) data. */
4518 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004519
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004520 /* For percentage calculations. */
4521 ulTotalTime /= 100UL;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004522
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004523 /* Avoid divide by zero errors. */
4524 if( ulTotalTime > 0UL )
4525 {
4526 /* Create a human readable table from the binary data. */
4527 for( x = 0; x < uxArraySize; x++ )
4528 {
4529 /* What percentage of the total run time has the task used?
4530 This will always be rounded down to the nearest integer.
4531 ulTotalRunTimeDiv100 has already been divided by 100. */
4532 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004533
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004534 /* Write the task name to the string, padding with
4535 spaces so it can be printed in tabular form more
4536 easily. */
4537 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004538
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004539 if( ulStatsAsPercentage > 0UL )
4540 {
4541 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4542 {
4543 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4544 }
4545 #else
4546 {
4547 /* sizeof( int ) == sizeof( long ) so a smaller
4548 printf() library can be used. */
4549 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4550 }
4551 #endif
4552 }
4553 else
4554 {
4555 /* If the percentage is zero here then the task has
4556 consumed less than 1% of the total run time. */
4557 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4558 {
4559 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4560 }
4561 #else
4562 {
4563 /* sizeof( int ) == sizeof( long ) so a smaller
4564 printf() library can be used. */
4565 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4566 }
4567 #endif
4568 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004569
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004570 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4571 }
4572 }
4573 else
4574 {
4575 mtCOVERAGE_TEST_MARKER();
4576 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004577
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004578 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4579 is 0 then vPortFree() will be #defined to nothing. */
4580 vPortFree( pxTaskStatusArray );
4581 }
4582 else
4583 {
4584 mtCOVERAGE_TEST_MARKER();
4585 }
4586 }
4587
4588#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004589/*-----------------------------------------------------------*/
4590
4591TickType_t uxTaskResetEventItemValue( void )
4592{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004593TickType_t uxReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004594
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004595 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004596
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004597 /* Reset the event list item to its normal value - so it can be used with
4598 queues and semaphores. */
4599 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004600
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004601 return uxReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004602}
4603/*-----------------------------------------------------------*/
4604
4605#if ( configUSE_MUTEXES == 1 )
4606
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004607 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
4608 {
4609 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4610 then pxCurrentTCB will be NULL. */
4611 if( pxCurrentTCB != NULL )
4612 {
4613 ( pxCurrentTCB->uxMutexesHeld )++;
4614 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004615
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004616 return pxCurrentTCB;
4617 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004618
4619#endif /* configUSE_MUTEXES */
4620/*-----------------------------------------------------------*/
4621
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004622#if( configUSE_TASK_NOTIFICATIONS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004623
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004624 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4625 {
4626 uint32_t ulReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004627
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004628 taskENTER_CRITICAL();
4629 {
4630 /* Only block if the notification count is not already non-zero. */
4631 if( pxCurrentTCB->ulNotifiedValue == 0UL )
4632 {
4633 /* Mark this task as waiting for a notification. */
4634 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004635
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004636 if( xTicksToWait > ( TickType_t ) 0 )
4637 {
4638 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4639 traceTASK_NOTIFY_TAKE_BLOCK();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004640
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004641 /* All ports are written to allow a yield in a critical
4642 section (some will yield immediately, others wait until the
4643 critical section exits) - but it is not something that
4644 application code should ever do. */
4645 portYIELD_WITHIN_API();
4646 }
4647 else
4648 {
4649 mtCOVERAGE_TEST_MARKER();
4650 }
4651 }
4652 else
4653 {
4654 mtCOVERAGE_TEST_MARKER();
4655 }
4656 }
4657 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004658
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004659 taskENTER_CRITICAL();
4660 {
4661 traceTASK_NOTIFY_TAKE();
4662 ulReturn = pxCurrentTCB->ulNotifiedValue;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004663
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004664 if( ulReturn != 0UL )
4665 {
4666 if( xClearCountOnExit != pdFALSE )
4667 {
4668 pxCurrentTCB->ulNotifiedValue = 0UL;
4669 }
4670 else
4671 {
4672 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
4673 }
4674 }
4675 else
4676 {
4677 mtCOVERAGE_TEST_MARKER();
4678 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004679
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004680 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4681 }
4682 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004683
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004684 return ulReturn;
4685 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004686
4687#endif /* configUSE_TASK_NOTIFICATIONS */
4688/*-----------------------------------------------------------*/
4689
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004690#if( configUSE_TASK_NOTIFICATIONS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004691
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004692 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4693 {
4694 BaseType_t xReturn;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004695
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004696 taskENTER_CRITICAL();
4697 {
4698 /* Only block if a notification is not already pending. */
4699 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4700 {
4701 /* Clear bits in the task's notification value as bits may get
4702 set by the notifying task or interrupt. This can be used to
4703 clear the value to zero. */
4704 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004705
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004706 /* Mark this task as waiting for a notification. */
4707 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004708
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004709 if( xTicksToWait > ( TickType_t ) 0 )
4710 {
4711 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4712 traceTASK_NOTIFY_WAIT_BLOCK();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004713
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004714 /* All ports are written to allow a yield in a critical
4715 section (some will yield immediately, others wait until the
4716 critical section exits) - but it is not something that
4717 application code should ever do. */
4718 portYIELD_WITHIN_API();
4719 }
4720 else
4721 {
4722 mtCOVERAGE_TEST_MARKER();
4723 }
4724 }
4725 else
4726 {
4727 mtCOVERAGE_TEST_MARKER();
4728 }
4729 }
4730 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004731
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004732 taskENTER_CRITICAL();
4733 {
4734 traceTASK_NOTIFY_WAIT();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004735
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004736 if( pulNotificationValue != NULL )
4737 {
4738 /* Output the current notification value, which may or may not
4739 have changed. */
4740 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
4741 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004742
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004743 /* If ucNotifyValue is set then either the task never entered the
4744 blocked state (because a notification was already pending) or the
4745 task unblocked because of a notification. Otherwise the task
4746 unblocked because of a timeout. */
4747 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4748 {
4749 /* A notification was not received. */
4750 xReturn = pdFALSE;
4751 }
4752 else
4753 {
4754 /* A notification was already pending or a notification was
4755 received while the task was waiting. */
4756 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
4757 xReturn = pdTRUE;
4758 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004759
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004760 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4761 }
4762 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004763
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004764 return xReturn;
4765 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004766
4767#endif /* configUSE_TASK_NOTIFICATIONS */
4768/*-----------------------------------------------------------*/
4769
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004770#if( configUSE_TASK_NOTIFICATIONS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004771
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004772 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
4773 {
4774 TCB_t * pxTCB;
4775 BaseType_t xReturn = pdPASS;
4776 uint8_t ucOriginalNotifyState;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004777
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004778 configASSERT( xTaskToNotify );
4779 pxTCB = xTaskToNotify;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004780
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004781 taskENTER_CRITICAL();
4782 {
4783 if( pulPreviousNotificationValue != NULL )
4784 {
4785 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4786 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004787
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004788 ucOriginalNotifyState = pxTCB->ucNotifyState;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004789
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004790 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004791
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004792 switch( eAction )
4793 {
4794 case eSetBits :
4795 pxTCB->ulNotifiedValue |= ulValue;
4796 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004797
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004798 case eIncrement :
4799 ( pxTCB->ulNotifiedValue )++;
4800 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004801
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004802 case eSetValueWithOverwrite :
4803 pxTCB->ulNotifiedValue = ulValue;
4804 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004805
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004806 case eSetValueWithoutOverwrite :
4807 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4808 {
4809 pxTCB->ulNotifiedValue = ulValue;
4810 }
4811 else
4812 {
4813 /* The value could not be written to the task. */
4814 xReturn = pdFAIL;
4815 }
4816 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004817
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004818 case eNoAction:
4819 /* The task is being notified without its notify value being
4820 updated. */
4821 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004822
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004823 default:
4824 /* Should not get here if all enums are handled.
4825 Artificially force an assert by testing a value the
4826 compiler can't assume is const. */
4827 configASSERT( pxTCB->ulNotifiedValue == ~0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004828
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004829 break;
4830 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004831
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004832 traceTASK_NOTIFY();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004833
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004834 /* If the task is in the blocked state specifically to wait for a
4835 notification then unblock it now. */
4836 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4837 {
4838 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4839 prvAddTaskToReadyList( pxTCB );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004840
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004841 /* The task should not have been on an event list. */
4842 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004843
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004844 #if( configUSE_TICKLESS_IDLE != 0 )
4845 {
4846 /* If a task is blocked waiting for a notification then
4847 xNextTaskUnblockTime might be set to the blocked task's time
4848 out time. If the task is unblocked for a reason other than
4849 a timeout xNextTaskUnblockTime is normally left unchanged,
4850 because it will automatically get reset to a new value when
4851 the tick count equals xNextTaskUnblockTime. However if
4852 tickless idling is used it might be more important to enter
4853 sleep mode at the earliest possible time - so reset
4854 xNextTaskUnblockTime here to ensure it is updated at the
4855 earliest possible time. */
4856 prvResetNextTaskUnblockTime();
4857 }
4858 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004859
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004860 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4861 {
4862 /* The notified task has a priority above the currently
4863 executing task so a yield is required. */
4864 taskYIELD_IF_USING_PREEMPTION();
4865 }
4866 else
4867 {
4868 mtCOVERAGE_TEST_MARKER();
4869 }
4870 }
4871 else
4872 {
4873 mtCOVERAGE_TEST_MARKER();
4874 }
4875 }
4876 taskEXIT_CRITICAL();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004877
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004878 return xReturn;
4879 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004880
4881#endif /* configUSE_TASK_NOTIFICATIONS */
4882/*-----------------------------------------------------------*/
4883
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004884#if( configUSE_TASK_NOTIFICATIONS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004885
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004886 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
4887 {
4888 TCB_t * pxTCB;
4889 uint8_t ucOriginalNotifyState;
4890 BaseType_t xReturn = pdPASS;
4891 UBaseType_t uxSavedInterruptStatus;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004892
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004893 configASSERT( xTaskToNotify );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004894
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004895 /* RTOS ports that support interrupt nesting have the concept of a
4896 maximum system call (or maximum API call) interrupt priority.
4897 Interrupts that are above the maximum system call priority are keep
4898 permanently enabled, even when the RTOS kernel is in a critical section,
4899 but cannot make any calls to FreeRTOS API functions. If configASSERT()
4900 is defined in FreeRTOSConfig.h then
4901 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4902 failure if a FreeRTOS API function is called from an interrupt that has
4903 been assigned a priority above the configured maximum system call
4904 priority. Only FreeRTOS functions that end in FromISR can be called
4905 from interrupts that have been assigned a priority at or (logically)
4906 below the maximum system call interrupt priority. FreeRTOS maintains a
4907 separate interrupt safe API to ensure interrupt entry is as fast and as
4908 simple as possible. More information (albeit Cortex-M specific) is
4909 provided on the following link:
4910 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
4911 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004912
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004913 pxTCB = xTaskToNotify;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004914
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004915 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
4916 {
4917 if( pulPreviousNotificationValue != NULL )
4918 {
4919 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4920 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004921
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004922 ucOriginalNotifyState = pxTCB->ucNotifyState;
4923 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004924
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004925 switch( eAction )
4926 {
4927 case eSetBits :
4928 pxTCB->ulNotifiedValue |= ulValue;
4929 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004930
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004931 case eIncrement :
4932 ( pxTCB->ulNotifiedValue )++;
4933 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004934
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004935 case eSetValueWithOverwrite :
4936 pxTCB->ulNotifiedValue = ulValue;
4937 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004938
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004939 case eSetValueWithoutOverwrite :
4940 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4941 {
4942 pxTCB->ulNotifiedValue = ulValue;
4943 }
4944 else
4945 {
4946 /* The value could not be written to the task. */
4947 xReturn = pdFAIL;
4948 }
4949 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004950
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004951 case eNoAction :
4952 /* The task is being notified without its notify value being
4953 updated. */
4954 break;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004955
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004956 default:
4957 /* Should not get here if all enums are handled.
4958 Artificially force an assert by testing a value the
4959 compiler can't assume is const. */
4960 configASSERT( pxTCB->ulNotifiedValue == ~0U );
4961 break;
4962 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004963
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004964 traceTASK_NOTIFY_FROM_ISR();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004965
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004966 /* If the task is in the blocked state specifically to wait for a
4967 notification then unblock it now. */
4968 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4969 {
4970 /* The task should not have been on an event list. */
4971 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004972
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004973 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
4974 {
4975 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4976 prvAddTaskToReadyList( pxTCB );
4977 }
4978 else
4979 {
4980 /* The delayed and ready lists cannot be accessed, so hold
4981 this task pending until the scheduler is resumed. */
4982 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
4983 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004984
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004985 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4986 {
4987 /* The notified task has a priority above the currently
4988 executing task so a yield is required. */
4989 if( pxHigherPriorityTaskWoken != NULL )
4990 {
4991 *pxHigherPriorityTaskWoken = pdTRUE;
4992 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004993
xiaohu.huang4f321fb2024-03-22 14:50:29 +08004994 /* Mark that a yield is pending in case the user is not
4995 using the "xHigherPriorityTaskWoken" parameter to an ISR
4996 safe FreeRTOS function. */
4997 xYieldPending = pdTRUE;
4998 }
4999 else
5000 {
5001 mtCOVERAGE_TEST_MARKER();
5002 }
5003 }
5004 }
5005 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005006
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005007 return xReturn;
5008 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005009
5010#endif /* configUSE_TASK_NOTIFICATIONS */
5011/*-----------------------------------------------------------*/
5012
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005013#if( configUSE_TASK_NOTIFICATIONS == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005014
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005015 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5016 {
5017 TCB_t * pxTCB;
5018 uint8_t ucOriginalNotifyState;
5019 UBaseType_t uxSavedInterruptStatus;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005020
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005021 configASSERT( xTaskToNotify );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005022
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005023 /* RTOS ports that support interrupt nesting have the concept of a
5024 maximum system call (or maximum API call) interrupt priority.
5025 Interrupts that are above the maximum system call priority are keep
5026 permanently enabled, even when the RTOS kernel is in a critical section,
5027 but cannot make any calls to FreeRTOS API functions. If configASSERT()
5028 is defined in FreeRTOSConfig.h then
5029 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5030 failure if a FreeRTOS API function is called from an interrupt that has
5031 been assigned a priority above the configured maximum system call
5032 priority. Only FreeRTOS functions that end in FromISR can be called
5033 from interrupts that have been assigned a priority at or (logically)
5034 below the maximum system call interrupt priority. FreeRTOS maintains a
5035 separate interrupt safe API to ensure interrupt entry is as fast and as
5036 simple as possible. More information (albeit Cortex-M specific) is
5037 provided on the following link:
5038 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
5039 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005040
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005041 pxTCB = xTaskToNotify;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005042
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005043 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
5044 {
5045 ucOriginalNotifyState = pxTCB->ucNotifyState;
5046 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005047
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005048 /* 'Giving' is equivalent to incrementing a count in a counting
5049 semaphore. */
5050 ( pxTCB->ulNotifiedValue )++;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005051
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005052 traceTASK_NOTIFY_GIVE_FROM_ISR();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005053
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005054 /* If the task is in the blocked state specifically to wait for a
5055 notification then unblock it now. */
5056 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5057 {
5058 /* The task should not have been on an event list. */
5059 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005060
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005061 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5062 {
5063 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5064 prvAddTaskToReadyList( pxTCB );
5065 }
5066 else
5067 {
5068 /* The delayed and ready lists cannot be accessed, so hold
5069 this task pending until the scheduler is resumed. */
5070 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5071 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005072
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005073 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5074 {
5075 /* The notified task has a priority above the currently
5076 executing task so a yield is required. */
5077 if( pxHigherPriorityTaskWoken != NULL )
5078 {
5079 *pxHigherPriorityTaskWoken = pdTRUE;
5080 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005081
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005082 /* Mark that a yield is pending in case the user is not
5083 using the "xHigherPriorityTaskWoken" parameter in an ISR
5084 safe FreeRTOS function. */
5085 xYieldPending = pdTRUE;
5086 }
5087 else
5088 {
5089 mtCOVERAGE_TEST_MARKER();
5090 }
5091 }
5092 }
5093 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5094 }
5095
5096#endif /* configUSE_TASK_NOTIFICATIONS */
5097
5098/*-----------------------------------------------------------*/
5099
5100#if( configUSE_TASK_NOTIFICATIONS == 1 )
5101
5102 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
5103 {
5104 TCB_t *pxTCB;
5105 BaseType_t xReturn;
5106
5107 /* If null is passed in here then it is the calling task that is having
5108 its notification state cleared. */
5109 pxTCB = prvGetTCBFromHandle( xTask );
5110
5111 taskENTER_CRITICAL();
5112 {
5113 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
5114 {
5115 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5116 xReturn = pdPASS;
5117 }
5118 else
5119 {
5120 xReturn = pdFAIL;
5121 }
5122 }
5123 taskEXIT_CRITICAL();
5124
5125 return xReturn;
5126 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005127
5128#endif /* configUSE_TASK_NOTIFICATIONS */
5129/*-----------------------------------------------------------*/
5130
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005131#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5132 TickType_t xTaskGetIdleRunTimeCounter( void )
5133 {
5134 return xIdleTaskHandle->ulRunTimeCounter;
5135 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005136#endif
5137/*-----------------------------------------------------------*/
5138
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005139static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005140{
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005141TickType_t xTimeToWake;
5142const TickType_t xConstTickCount = xTickCount;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005143
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005144 #if( INCLUDE_xTaskAbortDelay == 1 )
5145 {
5146 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5147 reset to pdFALSE so it can be detected as having been set to pdTRUE
5148 when the task leaves the Blocked state. */
5149 pxCurrentTCB->ucDelayAborted = pdFALSE;
5150 }
5151 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005152
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005153 /* Remove the task from the ready list before adding it to the blocked list
5154 as the same list item is used for both lists. */
5155 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
5156 {
5157 /* The current task must be in a ready list, so there is no need to
5158 check, and the port reset macro can be called directly. */
5159 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5160 }
5161 else
5162 {
5163 mtCOVERAGE_TEST_MARKER();
5164 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005165
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005166 #if ( INCLUDE_vTaskSuspend == 1 )
5167 {
5168 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
5169 {
5170 /* Add the task to the suspended task list instead of a delayed task
5171 list to ensure it is not woken by a timing event. It will block
5172 indefinitely. */
5173 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
5174 }
5175 else
5176 {
5177 /* Calculate the time at which the task should be woken if the event
5178 does not occur. This may overflow but this doesn't matter, the
5179 kernel will manage it correctly. */
5180 xTimeToWake = xConstTickCount + xTicksToWait;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005181
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005182 /* The list item will be inserted in wake time order. */
5183 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005184
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005185 if( xTimeToWake < xConstTickCount )
5186 {
5187 /* Wake time has overflowed. Place this item in the overflow
5188 list. */
5189 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5190 }
5191 else
5192 {
5193 /* The wake time has not overflowed, so the current block list
5194 is used. */
5195 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005196
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005197 /* If the task entering the blocked state was placed at the
5198 head of the list of blocked tasks then xNextTaskUnblockTime
5199 needs to be updated too. */
5200 if( xTimeToWake < xNextTaskUnblockTime )
5201 {
5202 xNextTaskUnblockTime = xTimeToWake;
5203 }
5204 else
5205 {
5206 mtCOVERAGE_TEST_MARKER();
5207 }
5208 }
5209 }
5210 }
5211 #else /* INCLUDE_vTaskSuspend */
5212 {
5213 /* Calculate the time at which the task should be woken if the event
5214 does not occur. This may overflow but this doesn't matter, the kernel
5215 will manage it correctly. */
5216 xTimeToWake = xConstTickCount + xTicksToWait;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005217
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005218 /* The list item will be inserted in wake time order. */
5219 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005220
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005221 if( xTimeToWake < xConstTickCount )
5222 {
5223 /* Wake time has overflowed. Place this item in the overflow list. */
5224 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5225 }
5226 else
5227 {
5228 /* The wake time has not overflowed, so the current block list is used. */
5229 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005230
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005231 /* If the task entering the blocked state was placed at the head of the
5232 list of blocked tasks then xNextTaskUnblockTime needs to be updated
5233 too. */
5234 if( xTimeToWake < xNextTaskUnblockTime )
5235 {
5236 xNextTaskUnblockTime = xTimeToWake;
5237 }
5238 else
5239 {
5240 mtCOVERAGE_TEST_MARKER();
5241 }
5242 }
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005243
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005244 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
5245 ( void ) xCanBlockIndefinitely;
5246 }
5247 #endif /* INCLUDE_vTaskSuspend */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005248}
5249
5250/* Code below here allows additional code to be inserted into this source file,
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005251especially where access to file scope functions and data is needed (for example
5252when performing module tests). */
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005253
5254#ifdef FREERTOS_MODULE_TEST
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005255 #include "tasks_test_access_functions.h"
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005256#endif
5257
5258
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005259#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005260
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005261 #include "freertos_tasks_c_additions.h"
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005262
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005263 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
5264 static void freertos_tasks_c_additions_init( void )
5265 {
5266 FREERTOS_TASKS_C_ADDITIONS_INIT();
5267 }
5268 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005269
xiaohu.huang4f321fb2024-03-22 14:50:29 +08005270#endif
5271
5272#if ENABLE_KASAN
5273void kasan_enable_current(void)
5274{
5275 if (pxCurrentTCB)
5276 pxCurrentTCB->kasan_depth--;
5277}
5278
5279void kasan_disable_current(void)
5280{
5281 if (pxCurrentTCB)
5282 pxCurrentTCB->kasan_depth++;
5283}
5284
5285int kasan_current_enabled(void)
5286{
5287 if (pxCurrentTCB)
5288 return (pxCurrentTCB->kasan_depth<=0);
5289 return 0;
5290}
5291#endif
Xiaohu.Huangc9a7d4f2022-03-15 13:48:38 +08005292
5293/* Add include implement source code which depend on the inner elements */
5294#include "aml_tasks_ext.c"