blob: c5ad83c9e60e9ce47f57b1dd09c9eafc1858d844 [file] [log] [blame]
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001/*
2 * FreeRTOS Kernel V10.2.1
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * http://www.FreeRTOS.org
23 * http://aws.amazon.com/freertos
24 *
25 * 1 tab == 4 spaces!
26 */
27
28/* Standard includes. */
29#include <stdlib.h>
30#include <string.h>
31
32/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33all the API functions to use the MPU wrappers. That should only be done when
34task.h is included from an application file. */
35#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37/* FreeRTOS includes. */
38#include "FreeRTOS.h"
39#include "task.h"
40#include "timers.h"
41#include "stack_macros.h"
shijie.xiongc3059922022-06-13 11:41:49 +080042#if CONFIG_FTRACE
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +080043#include "ftrace.h"
44#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080045
46/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
47because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
48for the header files above, but not in this file, in order to generate the
49correct privileged Vs unprivileged linkage and placement. */
50#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
51
52/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
53functions but without including stdio.h here. */
54#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
55 /* At the bottom of this file are two optional functions that can be used
56 to generate human readable text from the raw data generated by the
57 uxTaskGetSystemState() function. Note the formatting functions are provided
58 for convenience only, and are NOT considered part of the kernel. */
59 #include <stdio.h>
60#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
61
62#if( configUSE_PREEMPTION == 0 )
63 /* If the cooperative scheduler is being used then a yield should not be
64 performed just because a higher priority task has been woken. */
65 #define taskYIELD_IF_USING_PREEMPTION()
66#else
67 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
68#endif
69
70/* Values that can be assigned to the ucNotifyState member of the TCB. */
71#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
72#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
73#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
74
75/*
76 * The value used to fill the stack of a task when the task is created. This
77 * is used purely for checking the high water mark for tasks.
78 */
79#define tskSTACK_FILL_BYTE ( 0xa5U )
80
81/* Bits used to recored how a task's stack and TCB were allocated. */
82#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
83#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
84#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
85
86/* If any of the following are set then task stacks are filled with a known
87value so the high water mark can be determined. If none of the following are
88set then don't fill the stack so there is no unnecessary dependency on memset. */
89#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
90 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
91#else
92 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
93#endif
94
95/*
96 * Macros used by vListTask to indicate which state a task is in.
97 */
98#define tskRUNNING_CHAR ( 'X' )
99#define tskBLOCKED_CHAR ( 'B' )
100#define tskREADY_CHAR ( 'R' )
101#define tskDELETED_CHAR ( 'D' )
102#define tskSUSPENDED_CHAR ( 'S' )
103
104/*
105 * Some kernel aware debuggers require the data the debugger needs access to be
106 * global, rather than file scope.
107 */
108#ifdef portREMOVE_STATIC_QUALIFIER
109 #define static
110#endif
111
112/* The name allocated to the Idle task. This can be overridden by defining
113configIDLE_TASK_NAME in FreeRTOSConfig.h. */
114#ifndef configIDLE_TASK_NAME
115 #define configIDLE_TASK_NAME "IDLE"
116#endif
117
118#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
119
120 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
121 performed in a generic way that is not optimised to any particular
122 microcontroller architecture. */
123
124 /* uxTopReadyPriority holds the priority of the highest priority ready
125 state task. */
126 #define taskRECORD_READY_PRIORITY( uxPriority ) \
127 { \
128 if( ( uxPriority ) > uxTopReadyPriority ) \
129 { \
130 uxTopReadyPriority = ( uxPriority ); \
131 } \
132 } /* taskRECORD_READY_PRIORITY */
133
134 /*-----------------------------------------------------------*/
135
136 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
137 { \
138 UBaseType_t uxTopPriority = uxTopReadyPriority; \
139 \
140 /* Find the highest priority queue that contains ready tasks. */ \
141 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
142 { \
143 configASSERT( uxTopPriority ); \
144 --uxTopPriority; \
145 } \
146 \
147 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
148 the same priority get an equal share of the processor time. */ \
149 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
150 uxTopReadyPriority = uxTopPriority; \
151 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
152
153 /*-----------------------------------------------------------*/
154
155 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
156 they are only required when a port optimised method of task selection is
157 being used. */
158 #define taskRESET_READY_PRIORITY( uxPriority )
159 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
160
161#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
162
163 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
164 performed in a way that is tailored to the particular microcontroller
165 architecture being used. */
166
167 /* A port optimised version is provided. Call the port defined macros. */
168 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
169
170 /*-----------------------------------------------------------*/
171
172 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
173 { \
174 UBaseType_t uxTopPriority; \
175 \
176 /* Find the highest priority list that contains ready tasks. */ \
177 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
178 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
179 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
180 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
181
182 /*-----------------------------------------------------------*/
183
184 /* A port optimised version is provided, call it only if the TCB being reset
185 is being referenced from a ready list. If it is referenced from a delayed
186 or suspended list then it won't be in a ready list. */
187 #define taskRESET_READY_PRIORITY( uxPriority ) \
188 { \
189 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
190 { \
191 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
192 } \
193 }
194
195#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
196
197/*-----------------------------------------------------------*/
198
199/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
200count overflows. */
201#define taskSWITCH_DELAYED_LISTS() \
202{ \
203 List_t *pxTemp; \
204 \
205 /* The delayed tasks list should be empty when the lists are switched. */ \
206 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
207 \
208 pxTemp = pxDelayedTaskList; \
209 pxDelayedTaskList = pxOverflowDelayedTaskList; \
210 pxOverflowDelayedTaskList = pxTemp; \
211 xNumOfOverflows++; \
212 prvResetNextTaskUnblockTime(); \
213}
214
215/*-----------------------------------------------------------*/
216
217/*
218 * Place the task represented by pxTCB into the appropriate ready list for
219 * the task. It is inserted at the end of the list.
220 */
221#define prvAddTaskToReadyList( pxTCB ) \
222 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
223 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
224 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
225 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
226/*-----------------------------------------------------------*/
227
228/*
229 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
230 * where NULL is used to indicate that the handle of the currently executing
231 * task should be used in place of the parameter. This macro simply checks to
232 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
233 */
234#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
235
236/* The item value of the event list item is normally used to hold the priority
237of the task to which it belongs (coded to allow it to be held in reverse
238priority order). However, it is occasionally borrowed for other purposes. It
239is important its value is not updated due to a task priority change while it is
240being used for another purpose. The following bit definition is used to inform
241the scheduler that the value should not be changed - in which case it is the
242responsibility of whichever module is using the value to ensure it gets set back
243to its original value when it is released. */
244#if( configUSE_16_BIT_TICKS == 1 )
245 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
246#else
247 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
248#endif
249
250/*
251 * Task control block. A task control block (TCB) is allocated for each task,
252 * and stores task state information, including a pointer to the task's context
253 * (the task's run time environment, including register values)
254 */
255typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
256{
257 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
258
259 #if ( portUSING_MPU_WRAPPERS == 1 )
260 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
261 #endif
262
263 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
264 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
265 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
266 StackType_t *pxStack; /*< Points to the start of the stack. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800267 StackType_t uStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800268 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
269
270 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
271 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
272 #endif
273
274 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
275 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
276 #endif
277
278 #if ( configUSE_TRACE_FACILITY == 1 )
279 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
280 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
281 #endif
282
283 #if ( configUSE_MUTEXES == 1 )
284 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
285 UBaseType_t uxMutexesHeld;
286 #endif
287
288 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
289 TaskHookFunction_t pxTaskTag;
290 #endif
291
292 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
293 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
294 #endif
295
296 #if( configGENERATE_RUN_TIME_STATS == 1 )
297 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
298 #endif
299
300 #if ( configUSE_NEWLIB_REENTRANT == 1 )
301 /* Allocate a Newlib reent structure that is specific to this task.
302 Note Newlib support has been included by popular demand, but is not
303 used by the FreeRTOS maintainers themselves. FreeRTOS is not
304 responsible for resulting newlib operation. User must be familiar with
305 newlib and must provide system-wide implementations of the necessary
306 stubs. Be warned that (at the time of writing) the current newlib design
307 implements a system-wide malloc() that must be provided with locks. */
308 struct _reent xNewLib_reent;
309 #endif
310
311 #if( configUSE_TASK_NOTIFICATIONS == 1 )
312 volatile uint32_t ulNotifiedValue;
313 volatile uint8_t ucNotifyState;
314 #endif
315
316 /* See the comments in FreeRTOS.h with the definition of
317 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
318 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
319 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
320 #endif
321
322 #if( INCLUDE_xTaskAbortDelay == 1 )
323 uint8_t ucDelayAborted;
324 #endif
325
326 #if( configUSE_POSIX_ERRNO == 1 )
327 int iTaskErrno;
328 #endif
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800329 #if ( configUSE_TASK_START_HOOK == 1 )
330 void *pxTaskFun;
331 void *pxTaskPara;
332 #endif
333 #if ENABLE_KASAN
334 int kasan_depth;
335 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800336} tskTCB;
337
338/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
339below to enable the use of older kernel aware debuggers. */
340typedef tskTCB TCB_t;
341
342/*lint -save -e956 A manual analysis and inspection has been used to determine
343which static variables must be declared volatile. */
344PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
345
346/* Lists for ready and blocked tasks. --------------------
347xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
348doing so breaks some kernel aware debuggers and debuggers that rely on removing
349the static qualifier. */
350PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
351PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
352PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
353PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
354PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
355PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
356
357#if( INCLUDE_vTaskDelete == 1 )
358
359 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
360 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
361
362#endif
363
364#if ( INCLUDE_vTaskSuspend == 1 )
365
366 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
367
368#endif
369
370/* Global POSIX errno. Its value is changed upon context switching to match
371the errno of the currently running task. */
372#if ( configUSE_POSIX_ERRNO == 1 )
373 int FreeRTOS_errno = 0;
374#endif
375
376/* Other file private variables. --------------------------------*/
377PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
378PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
379PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
380PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
381PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
382PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
383PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
384PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
385PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
386PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
387
388/* Context switches are held pending while the scheduler is suspended. Also,
389interrupts must not manipulate the xStateListItem of a TCB, or any of the
390lists the xStateListItem can be referenced from, if the scheduler is suspended.
391If an interrupt needs to unblock a task while the scheduler is suspended then it
392moves the task's event list item into the xPendingReadyList, ready for the
393kernel to move the task from the pending ready list into the real ready list
394when the scheduler is unsuspended. The pending ready list itself can only be
395accessed from a critical section. */
396PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
397
398#if ( configGENERATE_RUN_TIME_STATS == 1 )
399
400 /* Do not move these variables to function scope as doing so prevents the
401 code working with debuggers that need to remove the static qualifier. */
402 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
403 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
404
405#endif
406
407/*lint -restore */
408
409/*-----------------------------------------------------------*/
410
411/* Callback function prototypes. --------------------------*/
412#if( configCHECK_FOR_STACK_OVERFLOW > 0 )
413
414 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
415
416#endif
417
418#if( configUSE_TICK_HOOK > 0 )
419
420 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
421
422#endif
423
424#if( configSUPPORT_STATIC_ALLOCATION == 1 )
425
426 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
427
428#endif
429
430/* File private functions. --------------------------------*/
431
432/**
433 * Utility task that simply returns pdTRUE if the task referenced by xTask is
434 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
435 * is in any other state.
436 */
437#if ( INCLUDE_vTaskSuspend == 1 )
438
439 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
440
441#endif /* INCLUDE_vTaskSuspend */
442
443/*
444 * Utility to ready all the lists used by the scheduler. This is called
445 * automatically upon the creation of the first task.
446 */
447static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
448
449/*
450 * The idle task, which as all tasks is implemented as a never ending loop.
451 * The idle task is automatically created and added to the ready lists upon
452 * creation of the first user task.
453 *
454 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
455 * language extensions. The equivalent prototype for this function is:
456 *
457 * void prvIdleTask( void *pvParameters );
458 *
459 */
460static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
461
462/*
463 * Utility to free all memory allocated by the scheduler to hold a TCB,
464 * including the stack pointed to by the TCB.
465 *
466 * This does not free memory allocated by the task itself (i.e. memory
467 * allocated by calls to pvPortMalloc from within the tasks application code).
468 */
469#if ( INCLUDE_vTaskDelete == 1 )
470
471 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
472
473#endif
474
475/*
476 * Used only by the idle task. This checks to see if anything has been placed
477 * in the list of tasks waiting to be deleted. If so the task is cleaned up
478 * and its TCB deleted.
479 */
480static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
481
482/*
483 * The currently executing task is entering the Blocked state. Add the task to
484 * either the current or the overflow delayed task list.
485 */
486static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
487
488/*
489 * Fills an TaskStatus_t structure with information on each task that is
490 * referenced from the pxList list (which may be a ready list, a delayed list,
491 * a suspended list, etc.).
492 *
493 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
494 * NORMAL APPLICATION CODE.
495 */
496#if ( configUSE_TRACE_FACILITY == 1 )
497
498 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
499
500#endif
501
502/*
503 * Searches pxList for a task with name pcNameToQuery - returning a handle to
504 * the task if it is found, or NULL if the task is not found.
505 */
506#if ( INCLUDE_xTaskGetHandle == 1 )
507
508 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
509
510#endif
511
512/*
513 * When a task is created, the stack of the task is filled with a known value.
514 * This function determines the 'high water mark' of the task stack by
515 * determining how much of the stack remains at the original preset value.
516 */
517#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
518
519 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
520
521#endif
522
523/*
524 * Return the amount of time, in ticks, that will pass before the kernel will
525 * next move a task from the Blocked state to the Running state.
526 *
527 * This conditional compilation should use inequality to 0, not equality to 1.
528 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
529 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
530 * set to a value other than 1.
531 */
532#if ( configUSE_TICKLESS_IDLE != 0 )
533
534 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
535
536#endif
537
538/*
539 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
540 * will exit the Blocked state.
541 */
542static void prvResetNextTaskUnblockTime( void );
543
544#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
545
546 /*
547 * Helper function used to pad task names with spaces when printing out
548 * human readable tables of task information.
549 */
550 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
551
552#endif
553
554/*
555 * Called after a Task_t structure has been allocated either statically or
556 * dynamically to fill in the structure's members.
557 */
558static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
559 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
560 const uint32_t ulStackDepth,
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800561 void * pvParameters,
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800562 UBaseType_t uxPriority,
563 TaskHandle_t * const pxCreatedTask,
564 TCB_t *pxNewTCB,
565 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
566
567/*
568 * Called after a new task has been created and initialised to place the task
569 * under the control of the scheduler.
570 */
571static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
572
573/*
574 * freertos_tasks_c_additions_init() should only be called if the user definable
575 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
576 * called by the function.
577 */
578#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
579
580 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
581
582#endif
583
584/*-----------------------------------------------------------*/
585
586#if( configSUPPORT_STATIC_ALLOCATION == 1 )
587
588 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
589 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
590 const uint32_t ulStackDepth,
591 void * const pvParameters,
592 UBaseType_t uxPriority,
593 StackType_t * const puxStackBuffer,
594 StaticTask_t * const pxTaskBuffer )
595 {
596 TCB_t *pxNewTCB;
597 TaskHandle_t xReturn;
598
599 configASSERT( puxStackBuffer != NULL );
600 configASSERT( pxTaskBuffer != NULL );
601
602 #if( configASSERT_DEFINED == 1 )
603 {
604 /* Sanity check that the size of the structure used to declare a
605 variable of type StaticTask_t equals the size of the real task
606 structure. */
607 volatile size_t xSize = sizeof( StaticTask_t );
608 configASSERT( xSize == sizeof( TCB_t ) );
609 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
610 }
611 #endif /* configASSERT_DEFINED */
612
613
614 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
615 {
616 /* The memory used for the task's TCB and stack are passed into this
617 function - use them. */
618 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
619 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
620
621 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
622 {
623 /* Tasks can be created statically or dynamically, so note this
624 task was created statically in case the task is later deleted. */
625 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
626 }
627 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
628
629 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
630 prvAddNewTaskToReadyList( pxNewTCB );
631 }
632 else
633 {
634 xReturn = NULL;
635 }
636
637 return xReturn;
638 }
639
640#endif /* SUPPORT_STATIC_ALLOCATION */
641/*-----------------------------------------------------------*/
642
643#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
644
645 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
646 {
647 TCB_t *pxNewTCB;
648 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
649
650 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
651 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
652
653 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
654 {
655 /* Allocate space for the TCB. Where the memory comes from depends
656 on the implementation of the port malloc function and whether or
657 not static allocation is being used. */
658 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
659
660 /* Store the stack location in the TCB. */
661 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
662
663 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
664 {
665 /* Tasks can be created statically or dynamically, so note this
666 task was created statically in case the task is later deleted. */
667 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
668 }
669 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
670
671 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
672 pxTaskDefinition->pcName,
673 ( uint32_t ) pxTaskDefinition->usStackDepth,
674 pxTaskDefinition->pvParameters,
675 pxTaskDefinition->uxPriority,
676 pxCreatedTask, pxNewTCB,
677 pxTaskDefinition->xRegions );
678
679 prvAddNewTaskToReadyList( pxNewTCB );
680 xReturn = pdPASS;
681 }
682
683 return xReturn;
684 }
685
686#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
687/*-----------------------------------------------------------*/
688
689#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
690
691 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
692 {
693 TCB_t *pxNewTCB;
694 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
695
696 configASSERT( pxTaskDefinition->puxStackBuffer );
697
698 if( pxTaskDefinition->puxStackBuffer != NULL )
699 {
700 /* Allocate space for the TCB. Where the memory comes from depends
701 on the implementation of the port malloc function and whether or
702 not static allocation is being used. */
703 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
704
705 if( pxNewTCB != NULL )
706 {
707 /* Store the stack location in the TCB. */
708 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
709
710 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
711 {
712 /* Tasks can be created statically or dynamically, so note
713 this task had a statically allocated stack in case it is
714 later deleted. The TCB was allocated dynamically. */
715 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
716 }
717 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
718
719 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
720 pxTaskDefinition->pcName,
721 ( uint32_t ) pxTaskDefinition->usStackDepth,
722 pxTaskDefinition->pvParameters,
723 pxTaskDefinition->uxPriority,
724 pxCreatedTask, pxNewTCB,
725 pxTaskDefinition->xRegions );
726
727 prvAddNewTaskToReadyList( pxNewTCB );
728 xReturn = pdPASS;
729 }
730 }
731
732 return xReturn;
733 }
734
735#endif /* portUSING_MPU_WRAPPERS */
736/*-----------------------------------------------------------*/
737
738#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
739
740 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
741 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
742 const configSTACK_DEPTH_TYPE usStackDepth,
743 void * const pvParameters,
744 UBaseType_t uxPriority,
745 TaskHandle_t * const pxCreatedTask )
746 {
747 TCB_t *pxNewTCB;
748 BaseType_t xReturn;
749
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800750
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800751 /* If the stack grows down then allocate the stack then the TCB so the stack
752 does not grow into the TCB. Likewise if the stack grows up then allocate
753 the TCB then the stack. */
754 #if( portSTACK_GROWTH > 0 )
755 {
756 /* Allocate space for the TCB. Where the memory comes from depends on
757 the implementation of the port malloc function and whether or not static
758 allocation is being used. */
759 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
760
761 if( pxNewTCB != NULL )
762 {
763 /* Allocate space for the stack used by the task being created.
764 The base of the stack memory stored in the TCB so the task can
765 be deleted later if required. */
766 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
767
768 if( pxNewTCB->pxStack == NULL )
769 {
770 /* Could not allocate the stack. Delete the allocated TCB. */
771 vPortFree( pxNewTCB );
772 pxNewTCB = NULL;
773 }
774 }
775 }
776 #else /* portSTACK_GROWTH */
777 {
778 StackType_t *pxStack;
779
780 /* Allocate space for the stack used by the task being created. */
781 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
782
783 if( pxStack != NULL )
784 {
785 /* Allocate space for the TCB. */
786 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
787
788 if( pxNewTCB != NULL )
789 {
790 /* Store the stack location in the TCB. */
791 pxNewTCB->pxStack = pxStack;
792 }
793 else
794 {
795 /* The stack cannot be used as the TCB was not created. Free
796 it again. */
797 vPortFree( pxStack );
798 }
799 }
800 else
801 {
802 pxNewTCB = NULL;
803 }
804 }
805 #endif /* portSTACK_GROWTH */
806
807 if( pxNewTCB != NULL )
808 {
809 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
810 {
811 /* Tasks can be created statically or dynamically, so note this
812 task was created dynamically in case it is later deleted. */
813 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
814 }
815 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
816
817 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
818 prvAddNewTaskToReadyList( pxNewTCB );
819 xReturn = pdPASS;
820 }
821 else
822 {
823 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
824 }
825
826 return xReturn;
827 }
828
829#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
830/*-----------------------------------------------------------*/
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800831#if ( configUSE_TASK_START_HOOK == 1 )
832static void prvTaskFunWrp( void *para)
833{
834 TCB_t *pxNewTCB = (TCB_t *)para;
835 {
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800836
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800837 extern void vApplicationTaskStartHook( void );
838 vApplicationTaskStartHook();
839 }
840 ((TaskFunction_t)pxNewTCB->pxTaskFun)(pxNewTCB->pxTaskPara);
841}
842#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800843static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
844 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
845 const uint32_t ulStackDepth,
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800846 void * pvParameters,
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800847 UBaseType_t uxPriority,
848 TaskHandle_t * const pxCreatedTask,
849 TCB_t *pxNewTCB,
850 const MemoryRegion_t * const xRegions )
851{
852StackType_t *pxTopOfStack;
853UBaseType_t x;
854
855 #if( portUSING_MPU_WRAPPERS == 1 )
856 /* Should the task be created in privileged mode? */
857 BaseType_t xRunPrivileged;
858 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
859 {
860 xRunPrivileged = pdTRUE;
861 }
862 else
863 {
864 xRunPrivileged = pdFALSE;
865 }
866 uxPriority &= ~portPRIVILEGE_BIT;
867 #endif /* portUSING_MPU_WRAPPERS == 1 */
868
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800869 #if ENABLE_KASAN
870 pxNewTCB->kasan_depth = 0;
871 #endif
872
873 #if ( configUSE_TASK_START_HOOK == 1 )
874 pxNewTCB->pxTaskFun = pxTaskCode;
875 pxNewTCB->pxTaskPara = pvParameters;
876 pxTaskCode = prvTaskFunWrp;
877 pvParameters = pxNewTCB;
878 #endif
879
880 pxNewTCB->uStackDepth = ulStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800881 /* Avoid dependency on memset() if it is not required. */
882 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
883 {
884 /* Fill the stack with a known value to assist debugging. */
885 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
886 }
887 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
888
889 /* Calculate the top of stack address. This depends on whether the stack
890 grows from high memory to low (as per the 80x86) or vice versa.
891 portSTACK_GROWTH is used to make the result positive or negative as required
892 by the port. */
893 #if( portSTACK_GROWTH < 0 )
894 {
895 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
896 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
897
898 /* Check the alignment of the calculated top of stack is correct. */
899 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
900
901 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
902 {
903 /* Also record the stack's high address, which may assist
904 debugging. */
905 pxNewTCB->pxEndOfStack = pxTopOfStack;
906 }
907 #endif /* configRECORD_STACK_HIGH_ADDRESS */
908 }
909 #else /* portSTACK_GROWTH */
910 {
911 pxTopOfStack = pxNewTCB->pxStack;
912
913 /* Check the alignment of the stack buffer is correct. */
914 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
915
916 /* The other extreme of the stack space is required if stack checking is
917 performed. */
918 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
919 }
920 #endif /* portSTACK_GROWTH */
921
922 /* Store the task name in the TCB. */
923 if( pcName != NULL )
924 {
925 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
926 {
927 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
928
929 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
930 configMAX_TASK_NAME_LEN characters just in case the memory after the
931 string is not accessible (extremely unlikely). */
932 if( pcName[ x ] == ( char ) 0x00 )
933 {
934 break;
935 }
936 else
937 {
938 mtCOVERAGE_TEST_MARKER();
939 }
940 }
941
942 /* Ensure the name string is terminated in the case that the string length
943 was greater or equal to configMAX_TASK_NAME_LEN. */
944 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
945 }
946 else
947 {
948 /* The task has not been given a name, so just ensure there is a NULL
949 terminator when it is read out. */
950 pxNewTCB->pcTaskName[ 0 ] = 0x00;
951 }
952
953 /* This is used as an array index so must ensure it's not too large. First
954 remove the privilege bit if one is present. */
955 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
956 {
957 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
958 }
959 else
960 {
961 mtCOVERAGE_TEST_MARKER();
962 }
963
964 pxNewTCB->uxPriority = uxPriority;
965 #if ( configUSE_MUTEXES == 1 )
966 {
967 pxNewTCB->uxBasePriority = uxPriority;
968 pxNewTCB->uxMutexesHeld = 0;
969 }
970 #endif /* configUSE_MUTEXES */
971
972 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
973 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
974
975 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
976 back to the containing TCB from a generic item in a list. */
977 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
978
979 /* Event lists are always in priority order. */
980 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
981 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
982
983 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
984 {
985 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
986 }
987 #endif /* portCRITICAL_NESTING_IN_TCB */
988
989 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
990 {
991 pxNewTCB->pxTaskTag = NULL;
992 }
993 #endif /* configUSE_APPLICATION_TASK_TAG */
994
995 #if ( configGENERATE_RUN_TIME_STATS == 1 )
996 {
997 pxNewTCB->ulRunTimeCounter = 0UL;
998 }
999 #endif /* configGENERATE_RUN_TIME_STATS */
1000
1001 #if ( portUSING_MPU_WRAPPERS == 1 )
1002 {
1003 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1004 }
1005 #else
1006 {
1007 /* Avoid compiler warning about unreferenced parameter. */
1008 ( void ) xRegions;
1009 }
1010 #endif
1011
1012 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1013 {
1014 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1015 {
1016 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1017 }
1018 }
1019 #endif
1020
1021 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1022 {
1023 pxNewTCB->ulNotifiedValue = 0;
1024 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1025 }
1026 #endif
1027
1028 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1029 {
1030 /* Initialise this task's Newlib reent structure. */
1031 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
1032 }
1033 #endif
1034
1035 #if( INCLUDE_xTaskAbortDelay == 1 )
1036 {
1037 pxNewTCB->ucDelayAborted = pdFALSE;
1038 }
1039 #endif
1040
1041 /* Initialize the TCB stack to look as if the task was already running,
1042 but had been interrupted by the scheduler. The return address is set
1043 to the start of the task function. Once the stack has been initialised
1044 the top of stack variable is updated. */
1045 #if( portUSING_MPU_WRAPPERS == 1 )
1046 {
1047 /* If the port has capability to detect stack overflow,
1048 pass the stack end address to the stack initialization
1049 function as well. */
1050 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1051 {
1052 #if( portSTACK_GROWTH < 0 )
1053 {
1054 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1055 }
1056 #else /* portSTACK_GROWTH */
1057 {
1058 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1059 }
1060 #endif /* portSTACK_GROWTH */
1061 }
1062 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1063 {
1064 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1065 }
1066 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1067 }
1068 #else /* portUSING_MPU_WRAPPERS */
1069 {
1070 /* If the port has capability to detect stack overflow,
1071 pass the stack end address to the stack initialization
1072 function as well. */
1073 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1074 {
1075 #if( portSTACK_GROWTH < 0 )
1076 {
1077 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1078 }
1079 #else /* portSTACK_GROWTH */
1080 {
1081 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1082 }
1083 #endif /* portSTACK_GROWTH */
1084 }
1085 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1086 {
1087 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1088 }
1089 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1090 }
1091 #endif /* portUSING_MPU_WRAPPERS */
1092
1093 if( pxCreatedTask != NULL )
1094 {
1095 /* Pass the handle out in an anonymous way. The handle can be used to
1096 change the created task's priority, delete the created task, etc.*/
1097 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1098 }
1099 else
1100 {
1101 mtCOVERAGE_TEST_MARKER();
1102 }
1103}
1104/*-----------------------------------------------------------*/
shijie.xiong392d3962022-03-17 14:07:27 +08001105#ifdef CONFIG_DMALLOC
1106extern struct MemLeak MemLeak_t[CONFIG_DMALLOC_SIZE];
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001107#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001108
1109static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
1110{
1111 /* Ensure interrupts don't access the task lists while the lists are being
1112 updated. */
1113 taskENTER_CRITICAL();
1114 {
1115 uxCurrentNumberOfTasks++;
1116 if( pxCurrentTCB == NULL )
1117 {
1118 /* There are no other tasks, or all the other tasks are in
1119 the suspended state - make this the current task. */
1120 pxCurrentTCB = pxNewTCB;
1121
1122 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1123 {
1124 /* This is the first task to be created so do the preliminary
1125 initialisation required. We will not recover if this call
1126 fails, but we will report the failure. */
1127 prvInitialiseTaskLists();
1128 }
1129 else
1130 {
1131 mtCOVERAGE_TEST_MARKER();
1132 }
1133 }
1134 else
1135 {
1136 /* If the scheduler is not already running, make this task the
1137 current task if it is the highest priority task to be created
1138 so far. */
1139 if( xSchedulerRunning == pdFALSE )
1140 {
1141 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1142 {
1143 pxCurrentTCB = pxNewTCB;
1144 }
1145 else
1146 {
1147 mtCOVERAGE_TEST_MARKER();
1148 }
1149 }
1150 else
1151 {
1152 mtCOVERAGE_TEST_MARKER();
1153 }
1154 }
1155
shijie.xiong392d3962022-03-17 14:07:27 +08001156#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001157 int i = 0;
1158
1159 if ( xSchedulerRunning != pdFALSE ) {
shijie.xiong392d3962022-03-17 14:07:27 +08001160 for (i = 1; i < CONFIG_DMALLOC_SIZE; i ++) {
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001161 if (MemLeak_t[i].Flag == 0) {
1162 uxTaskNumber = i;
1163 break;
1164 }
1165 }
1166
shijie.xiong392d3962022-03-17 14:07:27 +08001167 configASSERT( i < CONFIG_DMALLOC_SIZE );
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001168 } else {
1169 uxTaskNumber ++;
1170 }
1171 MemLeak_t[uxTaskNumber].Flag = 1;
1172#else
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001173 uxTaskNumber++;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001174#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001175
1176 #if ( configUSE_TRACE_FACILITY == 1 )
1177 {
1178 /* Add a counter into the TCB for tracing only. */
1179 pxNewTCB->uxTCBNumber = uxTaskNumber;
1180 }
1181 #endif /* configUSE_TRACE_FACILITY */
1182 traceTASK_CREATE( pxNewTCB );
1183
1184 prvAddTaskToReadyList( pxNewTCB );
1185
1186 portSETUP_TCB( pxNewTCB );
1187 }
1188 taskEXIT_CRITICAL();
1189
1190 if( xSchedulerRunning != pdFALSE )
1191 {
1192 /* If the created task is of a higher priority than the current task
1193 then it should run now. */
1194 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
1195 {
1196 taskYIELD_IF_USING_PREEMPTION();
1197 }
1198 else
1199 {
1200 mtCOVERAGE_TEST_MARKER();
1201 }
1202 }
1203 else
1204 {
1205 mtCOVERAGE_TEST_MARKER();
1206 }
1207}
1208/*-----------------------------------------------------------*/
1209
1210#if ( INCLUDE_vTaskDelete == 1 )
1211
1212 void vTaskDelete( TaskHandle_t xTaskToDelete )
1213 {
1214 TCB_t *pxTCB;
1215
1216 taskENTER_CRITICAL();
1217 {
1218 /* If null is passed in here then it is the calling task that is
1219 being deleted. */
1220 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1221
1222 /* Remove task from the ready list. */
1223 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1224 {
1225 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1226 }
1227 else
1228 {
1229 mtCOVERAGE_TEST_MARKER();
1230 }
1231
1232 /* Is the task waiting on an event also? */
1233 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1234 {
1235 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1236 }
1237 else
1238 {
1239 mtCOVERAGE_TEST_MARKER();
1240 }
1241
1242 /* Increment the uxTaskNumber also so kernel aware debuggers can
1243 detect that the task lists need re-generating. This is done before
1244 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1245 not return. */
1246 uxTaskNumber++;
1247
1248 if( pxTCB == pxCurrentTCB )
1249 {
1250 /* A task is deleting itself. This cannot complete within the
1251 task itself, as a context switch to another task is required.
1252 Place the task in the termination list. The idle task will
1253 check the termination list and free up any memory allocated by
1254 the scheduler for the TCB and stack of the deleted task. */
1255 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1256
1257 /* Increment the ucTasksDeleted variable so the idle task knows
1258 there is a task that has been deleted and that it should therefore
1259 check the xTasksWaitingTermination list. */
1260 ++uxDeletedTasksWaitingCleanUp;
1261
1262 /* The pre-delete hook is primarily for the Windows simulator,
1263 in which Windows specific clean up operations are performed,
1264 after which it is not possible to yield away from this task -
1265 hence xYieldPending is used to latch that a context switch is
1266 required. */
1267 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1268 }
1269 else
1270 {
1271 --uxCurrentNumberOfTasks;
1272 prvDeleteTCB( pxTCB );
1273
1274 /* Reset the next expected unblock time in case it referred to
1275 the task that has just been deleted. */
1276 prvResetNextTaskUnblockTime();
1277 }
1278
1279 traceTASK_DELETE( pxTCB );
shijie.xiong392d3962022-03-17 14:07:27 +08001280#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001281 MemLeak_t[pxTCB->uxTCBNumber].Flag = 0;
1282 MemLeak_t[pxTCB->uxTCBNumber].TaskNum = 0;
1283 MemLeak_t[pxTCB->uxTCBNumber].WantSize = 0;
1284 MemLeak_t[pxTCB->uxTCBNumber].WantTotalSize = 0;
1285 MemLeak_t[pxTCB->uxTCBNumber].MallocCount = 0;
1286 if (MemLeak_t[pxTCB->uxTCBNumber].TaskName)
1287 memset(MemLeak_t[pxTCB->uxTCBNumber].TaskName, 0, 20);
1288 MemLeak_t[pxTCB->uxTCBNumber].FreeSize = 0;
1289 MemLeak_t[pxTCB->uxTCBNumber].FreeTotalSize = 0;
1290 MemLeak_t[pxTCB->uxTCBNumber].FreeCount = 0;
1291#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001292 }
1293 taskEXIT_CRITICAL();
1294
1295 /* Force a reschedule if it is the currently running task that has just
1296 been deleted. */
1297 if( xSchedulerRunning != pdFALSE )
1298 {
1299 if( pxTCB == pxCurrentTCB )
1300 {
1301 configASSERT( uxSchedulerSuspended == 0 );
1302 portYIELD_WITHIN_API();
1303 }
1304 else
1305 {
1306 mtCOVERAGE_TEST_MARKER();
1307 }
1308 }
1309 }
1310
1311#endif /* INCLUDE_vTaskDelete */
1312/*-----------------------------------------------------------*/
1313
1314#if ( INCLUDE_vTaskDelayUntil == 1 )
1315
1316 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1317 {
1318 TickType_t xTimeToWake;
1319 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
1320
1321 configASSERT( pxPreviousWakeTime );
1322 configASSERT( ( xTimeIncrement > 0U ) );
1323 configASSERT( uxSchedulerSuspended == 0 );
1324
1325 vTaskSuspendAll();
1326 {
1327 /* Minor optimisation. The tick count cannot change in this
1328 block. */
1329 const TickType_t xConstTickCount = xTickCount;
1330
1331 /* Generate the tick time at which the task wants to wake. */
1332 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1333
1334 if( xConstTickCount < *pxPreviousWakeTime )
1335 {
1336 /* The tick count has overflowed since this function was
1337 lasted called. In this case the only time we should ever
1338 actually delay is if the wake time has also overflowed,
1339 and the wake time is greater than the tick time. When this
1340 is the case it is as if neither time had overflowed. */
1341 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1342 {
1343 xShouldDelay = pdTRUE;
1344 }
1345 else
1346 {
1347 mtCOVERAGE_TEST_MARKER();
1348 }
1349 }
1350 else
1351 {
1352 /* The tick time has not overflowed. In this case we will
1353 delay if either the wake time has overflowed, and/or the
1354 tick time is less than the wake time. */
1355 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1356 {
1357 xShouldDelay = pdTRUE;
1358 }
1359 else
1360 {
1361 mtCOVERAGE_TEST_MARKER();
1362 }
1363 }
1364
1365 /* Update the wake time ready for the next call. */
1366 *pxPreviousWakeTime = xTimeToWake;
1367
1368 if( xShouldDelay != pdFALSE )
1369 {
1370 traceTASK_DELAY_UNTIL( xTimeToWake );
1371
1372 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1373 the time to wake, so subtract the current tick count. */
1374 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
1375 }
1376 else
1377 {
1378 mtCOVERAGE_TEST_MARKER();
1379 }
1380 }
1381 xAlreadyYielded = xTaskResumeAll();
1382
1383 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1384 have put ourselves to sleep. */
1385 if( xAlreadyYielded == pdFALSE )
1386 {
1387 portYIELD_WITHIN_API();
1388 }
1389 else
1390 {
1391 mtCOVERAGE_TEST_MARKER();
1392 }
1393 }
1394
1395#endif /* INCLUDE_vTaskDelayUntil */
1396/*-----------------------------------------------------------*/
1397
1398#if ( INCLUDE_vTaskDelay == 1 )
1399
1400 void vTaskDelay( const TickType_t xTicksToDelay )
1401 {
1402 BaseType_t xAlreadyYielded = pdFALSE;
1403
1404 /* A delay time of zero just forces a reschedule. */
1405 if( xTicksToDelay > ( TickType_t ) 0U )
1406 {
1407 configASSERT( uxSchedulerSuspended == 0 );
1408 vTaskSuspendAll();
1409 {
1410 traceTASK_DELAY();
1411
1412 /* A task that is removed from the event list while the
1413 scheduler is suspended will not get placed in the ready
1414 list or removed from the blocked list until the scheduler
1415 is resumed.
1416
1417 This task cannot be in an event list as it is the currently
1418 executing task. */
1419 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
1420 }
1421 xAlreadyYielded = xTaskResumeAll();
1422 }
1423 else
1424 {
1425 mtCOVERAGE_TEST_MARKER();
1426 }
1427
1428 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1429 have put ourselves to sleep. */
1430 if( xAlreadyYielded == pdFALSE )
1431 {
1432 portYIELD_WITHIN_API();
1433 }
1434 else
1435 {
1436 mtCOVERAGE_TEST_MARKER();
1437 }
1438 }
1439
1440#endif /* INCLUDE_vTaskDelay */
1441/*-----------------------------------------------------------*/
1442
1443#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1444
1445 eTaskState eTaskGetState( TaskHandle_t xTask )
1446 {
1447 eTaskState eReturn;
1448 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
1449 const TCB_t * const pxTCB = xTask;
1450
1451 configASSERT( pxTCB );
1452
1453 if( pxTCB == pxCurrentTCB )
1454 {
1455 /* The task calling this function is querying its own state. */
1456 eReturn = eRunning;
1457 }
1458 else
1459 {
1460 taskENTER_CRITICAL();
1461 {
1462 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1463 pxDelayedList = pxDelayedTaskList;
1464 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1465 }
1466 taskEXIT_CRITICAL();
1467
1468 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1469 {
1470 /* The task being queried is referenced from one of the Blocked
1471 lists. */
1472 eReturn = eBlocked;
1473 }
1474
1475 #if ( INCLUDE_vTaskSuspend == 1 )
1476 else if( pxStateList == &xSuspendedTaskList )
1477 {
1478 /* The task being queried is referenced from the suspended
1479 list. Is it genuinely suspended or is it blocked
1480 indefinitely? */
1481 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1482 {
1483 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1484 {
1485 /* The task does not appear on the event list item of
1486 and of the RTOS objects, but could still be in the
1487 blocked state if it is waiting on its notification
1488 rather than waiting on an object. */
1489 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1490 {
1491 eReturn = eBlocked;
1492 }
1493 else
1494 {
1495 eReturn = eSuspended;
1496 }
1497 }
1498 #else
1499 {
1500 eReturn = eSuspended;
1501 }
1502 #endif
1503 }
1504 else
1505 {
1506 eReturn = eBlocked;
1507 }
1508 }
1509 #endif
1510
1511 #if ( INCLUDE_vTaskDelete == 1 )
1512 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1513 {
1514 /* The task being queried is referenced from the deleted
1515 tasks list, or it is not referenced from any lists at
1516 all. */
1517 eReturn = eDeleted;
1518 }
1519 #endif
1520
1521 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1522 {
1523 /* If the task is not in any other state, it must be in the
1524 Ready (including pending ready) state. */
1525 eReturn = eReady;
1526 }
1527 }
1528
1529 return eReturn;
1530 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1531
1532#endif /* INCLUDE_eTaskGetState */
1533/*-----------------------------------------------------------*/
1534
1535#if ( INCLUDE_uxTaskPriorityGet == 1 )
1536
1537 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1538 {
1539 TCB_t const *pxTCB;
1540 UBaseType_t uxReturn;
1541
1542 taskENTER_CRITICAL();
1543 {
1544 /* If null is passed in here then it is the priority of the task
1545 that called uxTaskPriorityGet() that is being queried. */
1546 pxTCB = prvGetTCBFromHandle( xTask );
1547 uxReturn = pxTCB->uxPriority;
1548 }
1549 taskEXIT_CRITICAL();
1550
1551 return uxReturn;
1552 }
1553
1554#endif /* INCLUDE_uxTaskPriorityGet */
1555/*-----------------------------------------------------------*/
1556
1557#if ( INCLUDE_uxTaskPriorityGet == 1 )
1558
1559 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1560 {
1561 TCB_t const *pxTCB;
1562 UBaseType_t uxReturn, uxSavedInterruptState;
1563
1564 /* RTOS ports that support interrupt nesting have the concept of a
1565 maximum system call (or maximum API call) interrupt priority.
1566 Interrupts that are above the maximum system call priority are keep
1567 permanently enabled, even when the RTOS kernel is in a critical section,
1568 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1569 is defined in FreeRTOSConfig.h then
1570 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1571 failure if a FreeRTOS API function is called from an interrupt that has
1572 been assigned a priority above the configured maximum system call
1573 priority. Only FreeRTOS functions that end in FromISR can be called
1574 from interrupts that have been assigned a priority at or (logically)
1575 below the maximum system call interrupt priority. FreeRTOS maintains a
1576 separate interrupt safe API to ensure interrupt entry is as fast and as
1577 simple as possible. More information (albeit Cortex-M specific) is
1578 provided on the following link:
1579 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1580 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1581
1582 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
1583 {
1584 /* If null is passed in here then it is the priority of the calling
1585 task that is being queried. */
1586 pxTCB = prvGetTCBFromHandle( xTask );
1587 uxReturn = pxTCB->uxPriority;
1588 }
1589 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
1590
1591 return uxReturn;
1592 }
1593
1594#endif /* INCLUDE_uxTaskPriorityGet */
1595/*-----------------------------------------------------------*/
1596
1597#if ( INCLUDE_vTaskPrioritySet == 1 )
1598
1599 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1600 {
1601 TCB_t *pxTCB;
1602 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1603 BaseType_t xYieldRequired = pdFALSE;
1604
1605 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1606
1607 /* Ensure the new priority is valid. */
1608 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1609 {
1610 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1611 }
1612 else
1613 {
1614 mtCOVERAGE_TEST_MARKER();
1615 }
1616
1617 taskENTER_CRITICAL();
1618 {
1619 /* If null is passed in here then it is the priority of the calling
1620 task that is being changed. */
1621 pxTCB = prvGetTCBFromHandle( xTask );
1622
1623 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1624
1625 #if ( configUSE_MUTEXES == 1 )
1626 {
1627 uxCurrentBasePriority = pxTCB->uxBasePriority;
1628 }
1629 #else
1630 {
1631 uxCurrentBasePriority = pxTCB->uxPriority;
1632 }
1633 #endif
1634
1635 if( uxCurrentBasePriority != uxNewPriority )
1636 {
1637 /* The priority change may have readied a task of higher
1638 priority than the calling task. */
1639 if( uxNewPriority > uxCurrentBasePriority )
1640 {
1641 if( pxTCB != pxCurrentTCB )
1642 {
1643 /* The priority of a task other than the currently
1644 running task is being raised. Is the priority being
1645 raised above that of the running task? */
1646 if( uxNewPriority >= pxCurrentTCB->uxPriority )
1647 {
1648 xYieldRequired = pdTRUE;
1649 }
1650 else
1651 {
1652 mtCOVERAGE_TEST_MARKER();
1653 }
1654 }
1655 else
1656 {
1657 /* The priority of the running task is being raised,
1658 but the running task must already be the highest
1659 priority task able to run so no yield is required. */
1660 }
1661 }
1662 else if( pxTCB == pxCurrentTCB )
1663 {
1664 /* Setting the priority of the running task down means
1665 there may now be another task of higher priority that
1666 is ready to execute. */
1667 xYieldRequired = pdTRUE;
1668 }
1669 else
1670 {
1671 /* Setting the priority of any other task down does not
1672 require a yield as the running task must be above the
1673 new priority of the task being modified. */
1674 }
1675
1676 /* Remember the ready list the task might be referenced from
1677 before its uxPriority member is changed so the
1678 taskRESET_READY_PRIORITY() macro can function correctly. */
1679 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1680
1681 #if ( configUSE_MUTEXES == 1 )
1682 {
1683 /* Only change the priority being used if the task is not
1684 currently using an inherited priority. */
1685 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1686 {
1687 pxTCB->uxPriority = uxNewPriority;
1688 }
1689 else
1690 {
1691 mtCOVERAGE_TEST_MARKER();
1692 }
1693
1694 /* The base priority gets set whatever. */
1695 pxTCB->uxBasePriority = uxNewPriority;
1696 }
1697 #else
1698 {
1699 pxTCB->uxPriority = uxNewPriority;
1700 }
1701 #endif
1702
1703 /* Only reset the event list item value if the value is not
1704 being used for anything else. */
1705 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1706 {
1707 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1708 }
1709 else
1710 {
1711 mtCOVERAGE_TEST_MARKER();
1712 }
1713
1714 /* If the task is in the blocked or suspended list we need do
1715 nothing more than change its priority variable. However, if
1716 the task is in a ready list it needs to be removed and placed
1717 in the list appropriate to its new priority. */
1718 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1719 {
1720 /* The task is currently in its ready list - remove before
1721 adding it to it's new ready list. As we are in a critical
1722 section we can do this even if the scheduler is suspended. */
1723 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1724 {
1725 /* It is known that the task is in its ready list so
1726 there is no need to check again and the port level
1727 reset macro can be called directly. */
1728 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1729 }
1730 else
1731 {
1732 mtCOVERAGE_TEST_MARKER();
1733 }
1734 prvAddTaskToReadyList( pxTCB );
1735 }
1736 else
1737 {
1738 mtCOVERAGE_TEST_MARKER();
1739 }
1740
1741 if( xYieldRequired != pdFALSE )
1742 {
1743 taskYIELD_IF_USING_PREEMPTION();
1744 }
1745 else
1746 {
1747 mtCOVERAGE_TEST_MARKER();
1748 }
1749
1750 /* Remove compiler warning about unused variables when the port
1751 optimised task selection is not being used. */
1752 ( void ) uxPriorityUsedOnEntry;
1753 }
1754 }
1755 taskEXIT_CRITICAL();
1756 }
1757
1758#endif /* INCLUDE_vTaskPrioritySet */
1759/*-----------------------------------------------------------*/
1760
1761#if ( INCLUDE_vTaskSuspend == 1 )
1762
1763 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1764 {
1765 TCB_t *pxTCB;
1766
1767 taskENTER_CRITICAL();
1768 {
1769 /* If null is passed in here then it is the running task that is
1770 being suspended. */
1771 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1772
1773 traceTASK_SUSPEND( pxTCB );
1774
1775 /* Remove task from the ready/delayed list and place in the
1776 suspended list. */
1777 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1778 {
1779 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1780 }
1781 else
1782 {
1783 mtCOVERAGE_TEST_MARKER();
1784 }
1785
1786 /* Is the task waiting on an event also? */
1787 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1788 {
1789 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1790 }
1791 else
1792 {
1793 mtCOVERAGE_TEST_MARKER();
1794 }
1795
1796 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1797
1798 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1799 {
1800 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1801 {
1802 /* The task was blocked to wait for a notification, but is
1803 now suspended, so no notification was received. */
1804 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1805 }
1806 }
1807 #endif
1808 }
1809 taskEXIT_CRITICAL();
1810
1811 if( xSchedulerRunning != pdFALSE )
1812 {
1813 /* Reset the next expected unblock time in case it referred to the
1814 task that is now in the Suspended state. */
1815 taskENTER_CRITICAL();
1816 {
1817 prvResetNextTaskUnblockTime();
1818 }
1819 taskEXIT_CRITICAL();
1820 }
1821 else
1822 {
1823 mtCOVERAGE_TEST_MARKER();
1824 }
1825
1826 if( pxTCB == pxCurrentTCB )
1827 {
1828 if( xSchedulerRunning != pdFALSE )
1829 {
1830 /* The current task has just been suspended. */
1831 configASSERT( uxSchedulerSuspended == 0 );
1832 portYIELD_WITHIN_API();
1833 }
1834 else
1835 {
1836 /* The scheduler is not running, but the task that was pointed
1837 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1838 must be adjusted to point to a different task. */
1839 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1840 {
1841 /* No other tasks are ready, so set pxCurrentTCB back to
1842 NULL so when the next task is created pxCurrentTCB will
1843 be set to point to it no matter what its relative priority
1844 is. */
1845 pxCurrentTCB = NULL;
1846 }
1847 else
1848 {
1849 vTaskSwitchContext();
1850 }
1851 }
1852 }
1853 else
1854 {
1855 mtCOVERAGE_TEST_MARKER();
1856 }
1857 }
1858
1859#endif /* INCLUDE_vTaskSuspend */
1860/*-----------------------------------------------------------*/
1861
1862#if ( INCLUDE_vTaskSuspend == 1 )
1863
1864 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1865 {
1866 BaseType_t xReturn = pdFALSE;
1867 const TCB_t * const pxTCB = xTask;
1868
1869 /* Accesses xPendingReadyList so must be called from a critical
1870 section. */
1871
1872 /* It does not make sense to check if the calling task is suspended. */
1873 configASSERT( xTask );
1874
1875 /* Is the task being resumed actually in the suspended list? */
1876 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
1877 {
1878 /* Has the task already been resumed from within an ISR? */
1879 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
1880 {
1881 /* Is it in the suspended list because it is in the Suspended
1882 state, or because is is blocked with no timeout? */
1883 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
1884 {
1885 xReturn = pdTRUE;
1886 }
1887 else
1888 {
1889 mtCOVERAGE_TEST_MARKER();
1890 }
1891 }
1892 else
1893 {
1894 mtCOVERAGE_TEST_MARKER();
1895 }
1896 }
1897 else
1898 {
1899 mtCOVERAGE_TEST_MARKER();
1900 }
1901
1902 return xReturn;
1903 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1904
1905#endif /* INCLUDE_vTaskSuspend */
1906/*-----------------------------------------------------------*/
1907
1908#if ( INCLUDE_vTaskSuspend == 1 )
1909
1910 void vTaskResume( TaskHandle_t xTaskToResume )
1911 {
1912 TCB_t * const pxTCB = xTaskToResume;
1913
1914 /* It does not make sense to resume the calling task. */
1915 configASSERT( xTaskToResume );
1916
1917 /* The parameter cannot be NULL as it is impossible to resume the
1918 currently executing task. */
1919 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
1920 {
1921 taskENTER_CRITICAL();
1922 {
1923 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1924 {
1925 traceTASK_RESUME( pxTCB );
1926
1927 /* The ready list can be accessed even if the scheduler is
1928 suspended because this is inside a critical section. */
1929 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1930 prvAddTaskToReadyList( pxTCB );
1931
1932 /* A higher priority task may have just been resumed. */
1933 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1934 {
1935 /* This yield may not cause the task just resumed to run,
1936 but will leave the lists in the correct state for the
1937 next yield. */
1938 taskYIELD_IF_USING_PREEMPTION();
1939 }
1940 else
1941 {
1942 mtCOVERAGE_TEST_MARKER();
1943 }
1944 }
1945 else
1946 {
1947 mtCOVERAGE_TEST_MARKER();
1948 }
1949 }
1950 taskEXIT_CRITICAL();
1951 }
1952 else
1953 {
1954 mtCOVERAGE_TEST_MARKER();
1955 }
1956 }
1957
1958#endif /* INCLUDE_vTaskSuspend */
1959
1960/*-----------------------------------------------------------*/
1961
1962#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1963
1964 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1965 {
1966 BaseType_t xYieldRequired = pdFALSE;
1967 TCB_t * const pxTCB = xTaskToResume;
1968 UBaseType_t uxSavedInterruptStatus;
1969
1970 configASSERT( xTaskToResume );
1971
1972 /* RTOS ports that support interrupt nesting have the concept of a
1973 maximum system call (or maximum API call) interrupt priority.
1974 Interrupts that are above the maximum system call priority are keep
1975 permanently enabled, even when the RTOS kernel is in a critical section,
1976 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1977 is defined in FreeRTOSConfig.h then
1978 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1979 failure if a FreeRTOS API function is called from an interrupt that has
1980 been assigned a priority above the configured maximum system call
1981 priority. Only FreeRTOS functions that end in FromISR can be called
1982 from interrupts that have been assigned a priority at or (logically)
1983 below the maximum system call interrupt priority. FreeRTOS maintains a
1984 separate interrupt safe API to ensure interrupt entry is as fast and as
1985 simple as possible. More information (albeit Cortex-M specific) is
1986 provided on the following link:
1987 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1988 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1989
1990 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1991 {
1992 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1993 {
1994 traceTASK_RESUME_FROM_ISR( pxTCB );
1995
1996 /* Check the ready lists can be accessed. */
1997 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
1998 {
1999 /* Ready lists can be accessed so move the task from the
2000 suspended list to the ready list directly. */
2001 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2002 {
2003 xYieldRequired = pdTRUE;
2004 }
2005 else
2006 {
2007 mtCOVERAGE_TEST_MARKER();
2008 }
2009
2010 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2011 prvAddTaskToReadyList( pxTCB );
2012 }
2013 else
2014 {
2015 /* The delayed or ready lists cannot be accessed so the task
2016 is held in the pending ready list until the scheduler is
2017 unsuspended. */
2018 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
2019 }
2020 }
2021 else
2022 {
2023 mtCOVERAGE_TEST_MARKER();
2024 }
2025 }
2026 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2027
2028 return xYieldRequired;
2029 }
2030
2031#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2032/*-----------------------------------------------------------*/
2033
2034void vTaskStartScheduler( void )
2035{
2036BaseType_t xReturn;
2037
2038 /* Add the idle task at the lowest priority. */
2039 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
2040 {
2041 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
2042 StackType_t *pxIdleTaskStackBuffer = NULL;
2043 uint32_t ulIdleTaskStackSize;
2044
2045 /* The Idle task is created using user provided RAM - obtain the
2046 address of the RAM then create the idle task. */
2047 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
2048 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
2049 configIDLE_TASK_NAME,
2050 ulIdleTaskStackSize,
2051 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
2052 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2053 pxIdleTaskStackBuffer,
2054 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2055
2056 if( xIdleTaskHandle != NULL )
2057 {
2058 xReturn = pdPASS;
2059 }
2060 else
2061 {
2062 xReturn = pdFAIL;
2063 }
2064 }
2065 #else
2066 {
2067 /* The Idle task is being created using dynamically allocated RAM. */
2068 xReturn = xTaskCreate( prvIdleTask,
2069 configIDLE_TASK_NAME,
2070 configMINIMAL_STACK_SIZE,
2071 ( void * ) NULL,
2072 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2073 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2074 }
2075 #endif /* configSUPPORT_STATIC_ALLOCATION */
2076
2077 #if ( configUSE_TIMERS == 1 )
2078 {
2079 if( xReturn == pdPASS )
2080 {
2081 xReturn = xTimerCreateTimerTask();
2082 }
2083 else
2084 {
2085 mtCOVERAGE_TEST_MARKER();
2086 }
2087 }
2088 #endif /* configUSE_TIMERS */
2089
2090 if( xReturn == pdPASS )
2091 {
2092 /* freertos_tasks_c_additions_init() should only be called if the user
2093 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2094 the only macro called by the function. */
2095 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2096 {
2097 freertos_tasks_c_additions_init();
2098 }
2099 #endif
2100
2101 /* Interrupts are turned off here, to ensure a tick does not occur
2102 before or during the call to xPortStartScheduler(). The stacks of
2103 the created tasks contain a status word with interrupts switched on
2104 so interrupts will automatically get re-enabled when the first task
2105 starts to run. */
2106 portDISABLE_INTERRUPTS();
2107
2108 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2109 {
2110 /* Switch Newlib's _impure_ptr variable to point to the _reent
2111 structure specific to the task that will run first. */
2112 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
2113 }
2114 #endif /* configUSE_NEWLIB_REENTRANT */
2115
2116 xNextTaskUnblockTime = portMAX_DELAY;
2117 xSchedulerRunning = pdTRUE;
2118 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2119
2120 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2121 macro must be defined to configure the timer/counter used to generate
2122 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2123 is set to 0 and the following line fails to build then ensure you do not
2124 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2125 FreeRTOSConfig.h file. */
2126 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2127
2128 traceTASK_SWITCHED_IN();
2129
2130 /* Setting up the timer tick is hardware specific and thus in the
2131 portable interface. */
2132 if( xPortStartScheduler() != pdFALSE )
2133 {
2134 /* Should not reach here as if the scheduler is running the
2135 function will not return. */
2136 }
2137 else
2138 {
2139 /* Should only reach here if a task calls xTaskEndScheduler(). */
2140 }
2141 }
2142 else
2143 {
2144 /* This line will only be reached if the kernel could not be started,
2145 because there was not enough FreeRTOS heap to create the idle task
2146 or the timer task. */
2147 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2148 }
2149
2150 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2151 meaning xIdleTaskHandle is not used anywhere else. */
2152 ( void ) xIdleTaskHandle;
2153}
2154/*-----------------------------------------------------------*/
2155
2156void vTaskEndScheduler( void )
2157{
2158 /* Stop the scheduler interrupts and call the portable scheduler end
2159 routine so the original ISRs can be restored if necessary. The port
2160 layer must ensure interrupts enable bit is left in the correct state. */
2161 portDISABLE_INTERRUPTS();
2162 xSchedulerRunning = pdFALSE;
2163 vPortEndScheduler();
2164}
2165/*----------------------------------------------------------*/
2166
2167void vTaskSuspendAll( void )
2168{
2169 /* A critical section is not required as the variable is of type
2170 BaseType_t. Please read Richard Barry's reply in the following link to a
2171 post in the FreeRTOS support forum before reporting this as a bug! -
2172 http://goo.gl/wu4acr */
2173 ++uxSchedulerSuspended;
2174 portMEMORY_BARRIER();
2175}
2176/*----------------------------------------------------------*/
2177
2178#if ( configUSE_TICKLESS_IDLE != 0 )
2179
2180 static TickType_t prvGetExpectedIdleTime( void )
2181 {
2182 TickType_t xReturn;
2183 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
2184
2185 /* uxHigherPriorityReadyTasks takes care of the case where
2186 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
2187 task that are in the Ready state, even though the idle task is
2188 running. */
2189 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
2190 {
2191 if( uxTopReadyPriority > tskIDLE_PRIORITY )
2192 {
2193 uxHigherPriorityReadyTasks = pdTRUE;
2194 }
2195 }
2196 #else
2197 {
2198 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
2199
2200 /* When port optimised task selection is used the uxTopReadyPriority
2201 variable is used as a bit map. If bits other than the least
2202 significant bit are set then there are tasks that have a priority
2203 above the idle priority that are in the Ready state. This takes
2204 care of the case where the co-operative scheduler is in use. */
2205 if( uxTopReadyPriority > uxLeastSignificantBit )
2206 {
2207 uxHigherPriorityReadyTasks = pdTRUE;
2208 }
2209 }
2210 #endif
2211
2212 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
2213 {
2214 xReturn = 0;
2215 }
2216 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2217 {
2218 /* There are other idle priority tasks in the ready state. If
2219 time slicing is used then the very next tick interrupt must be
2220 processed. */
2221 xReturn = 0;
2222 }
2223 else if( uxHigherPriorityReadyTasks != pdFALSE )
2224 {
2225 /* There are tasks in the Ready state that have a priority above the
2226 idle priority. This path can only be reached if
2227 configUSE_PREEMPTION is 0. */
2228 xReturn = 0;
2229 }
2230 else
2231 {
2232 xReturn = xNextTaskUnblockTime - xTickCount;
2233 }
2234
2235 return xReturn;
2236 }
2237
2238#endif /* configUSE_TICKLESS_IDLE */
2239/*----------------------------------------------------------*/
2240
2241BaseType_t xTaskResumeAll( void )
2242{
2243TCB_t *pxTCB = NULL;
2244BaseType_t xAlreadyYielded = pdFALSE;
2245
2246 /* If uxSchedulerSuspended is zero then this function does not match a
2247 previous call to vTaskSuspendAll(). */
2248 configASSERT( uxSchedulerSuspended );
2249
2250 /* It is possible that an ISR caused a task to be removed from an event
2251 list while the scheduler was suspended. If this was the case then the
2252 removed task will have been added to the xPendingReadyList. Once the
2253 scheduler has been resumed it is safe to move all the pending ready
2254 tasks from this list into their appropriate ready list. */
2255 taskENTER_CRITICAL();
2256 {
2257 --uxSchedulerSuspended;
2258
2259 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2260 {
2261 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2262 {
2263 /* Move any readied tasks from the pending list into the
2264 appropriate ready list. */
2265 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
2266 {
2267 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2268 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2269 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2270 prvAddTaskToReadyList( pxTCB );
2271
2272 /* If the moved task has a priority higher than the current
2273 task then a yield must be performed. */
2274 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2275 {
2276 xYieldPending = pdTRUE;
2277 }
2278 else
2279 {
2280 mtCOVERAGE_TEST_MARKER();
2281 }
2282 }
2283
2284 if( pxTCB != NULL )
2285 {
2286 /* A task was unblocked while the scheduler was suspended,
2287 which may have prevented the next unblock time from being
2288 re-calculated, in which case re-calculate it now. Mainly
2289 important for low power tickless implementations, where
2290 this can prevent an unnecessary exit from low power
2291 state. */
2292 prvResetNextTaskUnblockTime();
2293 }
2294
2295 /* If any ticks occurred while the scheduler was suspended then
2296 they should be processed now. This ensures the tick count does
2297 not slip, and that any delayed tasks are resumed at the correct
2298 time. */
2299 {
2300 UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
2301
2302 if( uxPendedCounts > ( UBaseType_t ) 0U )
2303 {
2304 do
2305 {
2306 if( xTaskIncrementTick() != pdFALSE )
2307 {
2308 xYieldPending = pdTRUE;
2309 }
2310 else
2311 {
2312 mtCOVERAGE_TEST_MARKER();
2313 }
2314 --uxPendedCounts;
2315 } while( uxPendedCounts > ( UBaseType_t ) 0U );
2316
2317 uxPendedTicks = 0;
2318 }
2319 else
2320 {
2321 mtCOVERAGE_TEST_MARKER();
2322 }
2323 }
2324
2325 if( xYieldPending != pdFALSE )
2326 {
2327 #if( configUSE_PREEMPTION != 0 )
2328 {
2329 xAlreadyYielded = pdTRUE;
2330 }
2331 #endif
2332 taskYIELD_IF_USING_PREEMPTION();
2333 }
2334 else
2335 {
2336 mtCOVERAGE_TEST_MARKER();
2337 }
2338 }
2339 }
2340 else
2341 {
2342 mtCOVERAGE_TEST_MARKER();
2343 }
2344 }
2345 taskEXIT_CRITICAL();
2346
2347 return xAlreadyYielded;
2348}
2349/*-----------------------------------------------------------*/
2350
2351TickType_t xTaskGetTickCount( void )
2352{
2353TickType_t xTicks;
2354
2355 /* Critical section required if running on a 16 bit processor. */
2356 portTICK_TYPE_ENTER_CRITICAL();
2357 {
2358 xTicks = xTickCount;
2359 }
2360 portTICK_TYPE_EXIT_CRITICAL();
2361
2362 return xTicks;
2363}
2364/*-----------------------------------------------------------*/
2365
2366TickType_t xTaskGetTickCountFromISR( void )
2367{
2368TickType_t xReturn;
2369UBaseType_t uxSavedInterruptStatus;
2370
2371 /* RTOS ports that support interrupt nesting have the concept of a maximum
2372 system call (or maximum API call) interrupt priority. Interrupts that are
2373 above the maximum system call priority are kept permanently enabled, even
2374 when the RTOS kernel is in a critical section, but cannot make any calls to
2375 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2376 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2377 failure if a FreeRTOS API function is called from an interrupt that has been
2378 assigned a priority above the configured maximum system call priority.
2379 Only FreeRTOS functions that end in FromISR can be called from interrupts
2380 that have been assigned a priority at or (logically) below the maximum
2381 system call interrupt priority. FreeRTOS maintains a separate interrupt
2382 safe API to ensure interrupt entry is as fast and as simple as possible.
2383 More information (albeit Cortex-M specific) is provided on the following
2384 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2385 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2386
2387 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2388 {
2389 xReturn = xTickCount;
2390 }
2391 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2392
2393 return xReturn;
2394}
2395/*-----------------------------------------------------------*/
2396
2397UBaseType_t uxTaskGetNumberOfTasks( void )
2398{
2399 /* A critical section is not required because the variables are of type
2400 BaseType_t. */
2401 return uxCurrentNumberOfTasks;
2402}
2403/*-----------------------------------------------------------*/
2404
2405char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2406{
2407TCB_t *pxTCB;
2408
2409 /* If null is passed in here then the name of the calling task is being
2410 queried. */
2411 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
shijie.xiong392d3962022-03-17 14:07:27 +08002412#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002413 if (pxTCB == NULL)
2414 return NULL;
2415#else
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002416 configASSERT( pxTCB );
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002417#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002418 return &( pxTCB->pcTaskName[ 0 ] );
2419}
Kelvin Zhang7f929772021-12-31 17:58:17 +08002420
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002421#if ( INCLUDE_xTaskGetHandle == 1 )
2422
2423 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
2424 {
2425 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
2426 UBaseType_t x;
2427 char cNextChar;
2428 BaseType_t xBreakLoop;
2429
2430 /* This function is called with the scheduler suspended. */
2431
2432 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2433 {
2434 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2435
2436 do
2437 {
2438 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2439
2440 /* Check each character in the name looking for a match or
2441 mismatch. */
2442 xBreakLoop = pdFALSE;
2443 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2444 {
2445 cNextChar = pxNextTCB->pcTaskName[ x ];
2446
2447 if( cNextChar != pcNameToQuery[ x ] )
2448 {
2449 /* Characters didn't match. */
2450 xBreakLoop = pdTRUE;
2451 }
2452 else if( cNextChar == ( char ) 0x00 )
2453 {
2454 /* Both strings terminated, a match must have been
2455 found. */
2456 pxReturn = pxNextTCB;
2457 xBreakLoop = pdTRUE;
2458 }
2459 else
2460 {
2461 mtCOVERAGE_TEST_MARKER();
2462 }
2463
2464 if( xBreakLoop != pdFALSE )
2465 {
2466 break;
2467 }
2468 }
2469
2470 if( pxReturn != NULL )
2471 {
2472 /* The handle has been found. */
2473 break;
2474 }
2475
2476 } while( pxNextTCB != pxFirstTCB );
2477 }
2478 else
2479 {
2480 mtCOVERAGE_TEST_MARKER();
2481 }
2482
2483 return pxReturn;
2484 }
2485
2486#endif /* INCLUDE_xTaskGetHandle */
2487/*-----------------------------------------------------------*/
2488
2489#if ( INCLUDE_xTaskGetHandle == 1 )
2490
2491 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2492 {
2493 UBaseType_t uxQueue = configMAX_PRIORITIES;
2494 TCB_t* pxTCB;
2495
2496 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2497 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2498
2499 vTaskSuspendAll();
2500 {
2501 /* Search the ready lists. */
2502 do
2503 {
2504 uxQueue--;
2505 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2506
2507 if( pxTCB != NULL )
2508 {
2509 /* Found the handle. */
2510 break;
2511 }
2512
2513 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2514
2515 /* Search the delayed lists. */
2516 if( pxTCB == NULL )
2517 {
2518 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2519 }
2520
2521 if( pxTCB == NULL )
2522 {
2523 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2524 }
2525
2526 #if ( INCLUDE_vTaskSuspend == 1 )
2527 {
2528 if( pxTCB == NULL )
2529 {
2530 /* Search the suspended list. */
2531 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2532 }
2533 }
2534 #endif
2535
2536 #if( INCLUDE_vTaskDelete == 1 )
2537 {
2538 if( pxTCB == NULL )
2539 {
2540 /* Search the deleted list. */
2541 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2542 }
2543 }
2544 #endif
2545 }
2546 ( void ) xTaskResumeAll();
2547
2548 return pxTCB;
2549 }
2550
2551#endif /* INCLUDE_xTaskGetHandle */
2552/*-----------------------------------------------------------*/
2553
2554#if ( configUSE_TRACE_FACILITY == 1 )
2555
2556 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2557 {
2558 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2559
2560 vTaskSuspendAll();
2561 {
2562 /* Is there a space in the array for each task in the system? */
2563 if( uxArraySize >= uxCurrentNumberOfTasks )
2564 {
2565 /* Fill in an TaskStatus_t structure with information on each
2566 task in the Ready state. */
2567 do
2568 {
2569 uxQueue--;
2570 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2571
2572 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2573
2574 /* Fill in an TaskStatus_t structure with information on each
2575 task in the Blocked state. */
2576 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2577 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2578
2579 #if( INCLUDE_vTaskDelete == 1 )
2580 {
2581 /* Fill in an TaskStatus_t structure with information on
2582 each task that has been deleted but not yet cleaned up. */
2583 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2584 }
2585 #endif
2586
2587 #if ( INCLUDE_vTaskSuspend == 1 )
2588 {
2589 /* Fill in an TaskStatus_t structure with information on
2590 each task in the Suspended state. */
2591 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2592 }
2593 #endif
2594
2595 #if ( configGENERATE_RUN_TIME_STATS == 1)
2596 {
2597 if( pulTotalRunTime != NULL )
2598 {
2599 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2600 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2601 #else
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002602 *pulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002603 #endif
2604 }
2605 }
2606 #else
2607 {
2608 if( pulTotalRunTime != NULL )
2609 {
2610 *pulTotalRunTime = 0;
2611 }
2612 }
2613 #endif
2614 }
2615 else
2616 {
2617 mtCOVERAGE_TEST_MARKER();
2618 }
2619 }
2620 ( void ) xTaskResumeAll();
2621
2622 return uxTask;
2623 }
2624
2625#endif /* configUSE_TRACE_FACILITY */
2626/*----------------------------------------------------------*/
2627
2628#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2629
2630 TaskHandle_t xTaskGetIdleTaskHandle( void )
2631 {
2632 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2633 started, then xIdleTaskHandle will be NULL. */
2634 configASSERT( ( xIdleTaskHandle != NULL ) );
2635 return xIdleTaskHandle;
2636 }
2637
2638#endif /* INCLUDE_xTaskGetIdleTaskHandle */
2639/*----------------------------------------------------------*/
2640
2641/* This conditional compilation should use inequality to 0, not equality to 1.
2642This is to ensure vTaskStepTick() is available when user defined low power mode
2643implementations require configUSE_TICKLESS_IDLE to be set to a value other than
26441. */
2645#if ( configUSE_TICKLESS_IDLE != 0 )
2646
2647 void vTaskStepTick( const TickType_t xTicksToJump )
2648 {
2649 /* Correct the tick count value after a period during which the tick
2650 was suppressed. Note this does *not* call the tick hook function for
2651 each stepped tick. */
2652 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2653 xTickCount += xTicksToJump;
2654 traceINCREASE_TICK_COUNT( xTicksToJump );
2655 }
2656
2657#endif /* configUSE_TICKLESS_IDLE */
2658/*----------------------------------------------------------*/
2659
2660#if ( INCLUDE_xTaskAbortDelay == 1 )
2661
2662 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2663 {
2664 TCB_t *pxTCB = xTask;
2665 BaseType_t xReturn;
2666
2667 configASSERT( pxTCB );
2668
2669 vTaskSuspendAll();
2670 {
2671 /* A task can only be prematurely removed from the Blocked state if
2672 it is actually in the Blocked state. */
2673 if( eTaskGetState( xTask ) == eBlocked )
2674 {
2675 xReturn = pdPASS;
2676
2677 /* Remove the reference to the task from the blocked list. An
2678 interrupt won't touch the xStateListItem because the
2679 scheduler is suspended. */
2680 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2681
2682 /* Is the task waiting on an event also? If so remove it from
2683 the event list too. Interrupts can touch the event list item,
2684 even though the scheduler is suspended, so a critical section
2685 is used. */
2686 taskENTER_CRITICAL();
2687 {
2688 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2689 {
2690 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2691 pxTCB->ucDelayAborted = pdTRUE;
2692 }
2693 else
2694 {
2695 mtCOVERAGE_TEST_MARKER();
2696 }
2697 }
2698 taskEXIT_CRITICAL();
2699
2700 /* Place the unblocked task into the appropriate ready list. */
2701 prvAddTaskToReadyList( pxTCB );
2702
2703 /* A task being unblocked cannot cause an immediate context
2704 switch if preemption is turned off. */
2705 #if ( configUSE_PREEMPTION == 1 )
2706 {
2707 /* Preemption is on, but a context switch should only be
2708 performed if the unblocked task has a priority that is
2709 equal to or higher than the currently executing task. */
2710 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2711 {
2712 /* Pend the yield to be performed when the scheduler
2713 is unsuspended. */
2714 xYieldPending = pdTRUE;
2715 }
2716 else
2717 {
2718 mtCOVERAGE_TEST_MARKER();
2719 }
2720 }
2721 #endif /* configUSE_PREEMPTION */
2722 }
2723 else
2724 {
2725 xReturn = pdFAIL;
2726 }
2727 }
2728 ( void ) xTaskResumeAll();
2729
2730 return xReturn;
2731 }
2732
2733#endif /* INCLUDE_xTaskAbortDelay */
2734/*----------------------------------------------------------*/
2735
2736BaseType_t xTaskIncrementTick( void )
2737{
2738TCB_t * pxTCB;
2739TickType_t xItemValue;
2740BaseType_t xSwitchRequired = pdFALSE;
2741
2742 /* Called by the portable layer each time a tick interrupt occurs.
2743 Increments the tick then checks to see if the new tick value will cause any
2744 tasks to be unblocked. */
2745 traceTASK_INCREMENT_TICK( xTickCount );
2746 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2747 {
2748 /* Minor optimisation. The tick count cannot change in this
2749 block. */
2750 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
2751
2752 /* Increment the RTOS tick, switching the delayed and overflowed
2753 delayed lists if it wraps to 0. */
2754 xTickCount = xConstTickCount;
2755
2756 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
2757 {
2758 taskSWITCH_DELAYED_LISTS();
2759 }
2760 else
2761 {
2762 mtCOVERAGE_TEST_MARKER();
2763 }
2764
2765 /* See if this tick has made a timeout expire. Tasks are stored in
2766 the queue in the order of their wake time - meaning once one task
2767 has been found whose block time has not expired there is no need to
2768 look any further down the list. */
2769 if( xConstTickCount >= xNextTaskUnblockTime )
2770 {
2771 for( ;; )
2772 {
2773 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2774 {
2775 /* The delayed list is empty. Set xNextTaskUnblockTime
2776 to the maximum possible value so it is extremely
2777 unlikely that the
2778 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2779 next time through. */
2780 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2781 break;
2782 }
2783 else
2784 {
2785 /* The delayed list is not empty, get the value of the
2786 item at the head of the delayed list. This is the time
2787 at which the task at the head of the delayed list must
2788 be removed from the Blocked state. */
2789 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2790 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
2791
2792 if( xConstTickCount < xItemValue )
2793 {
2794 /* It is not time to unblock this item yet, but the
2795 item value is the time at which the task at the head
2796 of the blocked list must be removed from the Blocked
2797 state - so record the item value in
2798 xNextTaskUnblockTime. */
2799 xNextTaskUnblockTime = xItemValue;
2800 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
2801 }
2802 else
2803 {
2804 mtCOVERAGE_TEST_MARKER();
2805 }
2806
2807 /* It is time to remove the item from the Blocked state. */
2808 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2809
2810 /* Is the task waiting on an event also? If so remove
2811 it from the event list. */
2812 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2813 {
2814 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2815 }
2816 else
2817 {
2818 mtCOVERAGE_TEST_MARKER();
2819 }
2820
2821 /* Place the unblocked task into the appropriate ready
2822 list. */
2823 prvAddTaskToReadyList( pxTCB );
2824
2825 /* A task being unblocked cannot cause an immediate
2826 context switch if preemption is turned off. */
2827 #if ( configUSE_PREEMPTION == 1 )
2828 {
2829 /* Preemption is on, but a context switch should
2830 only be performed if the unblocked task has a
2831 priority that is equal to or higher than the
2832 currently executing task. */
2833 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2834 {
2835 xSwitchRequired = pdTRUE;
2836 }
2837 else
2838 {
2839 mtCOVERAGE_TEST_MARKER();
2840 }
2841 }
2842 #endif /* configUSE_PREEMPTION */
2843 }
2844 }
2845 }
2846
2847 /* Tasks of equal priority to the currently running task will share
2848 processing time (time slice) if preemption is on, and the application
2849 writer has not explicitly turned time slicing off. */
2850 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2851 {
2852 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2853 {
2854 xSwitchRequired = pdTRUE;
2855 }
2856 else
2857 {
2858 mtCOVERAGE_TEST_MARKER();
2859 }
2860 }
2861 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2862
2863 #if ( configUSE_TICK_HOOK == 1 )
2864 {
2865 /* Guard against the tick hook being called when the pended tick
2866 count is being unwound (when the scheduler is being unlocked). */
2867 if( uxPendedTicks == ( UBaseType_t ) 0U )
2868 {
2869 vApplicationTickHook();
2870 }
2871 else
2872 {
2873 mtCOVERAGE_TEST_MARKER();
2874 }
2875 }
2876 #endif /* configUSE_TICK_HOOK */
2877 }
2878 else
2879 {
2880 ++uxPendedTicks;
2881
2882 /* The tick hook gets called at regular intervals, even if the
2883 scheduler is locked. */
2884 #if ( configUSE_TICK_HOOK == 1 )
2885 {
2886 vApplicationTickHook();
2887 }
2888 #endif
2889 }
2890
2891 #if ( configUSE_PREEMPTION == 1 )
2892 {
2893 if( xYieldPending != pdFALSE )
2894 {
2895 xSwitchRequired = pdTRUE;
2896 }
2897 else
2898 {
2899 mtCOVERAGE_TEST_MARKER();
2900 }
2901 }
2902 #endif /* configUSE_PREEMPTION */
2903
2904 return xSwitchRequired;
2905}
2906/*-----------------------------------------------------------*/
2907
2908#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2909
2910 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2911 {
2912 TCB_t *xTCB;
2913
2914 /* If xTask is NULL then it is the task hook of the calling task that is
2915 getting set. */
2916 if( xTask == NULL )
2917 {
2918 xTCB = ( TCB_t * ) pxCurrentTCB;
2919 }
2920 else
2921 {
2922 xTCB = xTask;
2923 }
2924
2925 /* Save the hook function in the TCB. A critical section is required as
2926 the value can be accessed from an interrupt. */
2927 taskENTER_CRITICAL();
2928 {
2929 xTCB->pxTaskTag = pxHookFunction;
2930 }
2931 taskEXIT_CRITICAL();
2932 }
2933
2934#endif /* configUSE_APPLICATION_TASK_TAG */
2935/*-----------------------------------------------------------*/
2936
2937#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2938
2939 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2940 {
2941 TCB_t *pxTCB;
2942 TaskHookFunction_t xReturn;
2943
2944 /* If xTask is NULL then set the calling task's hook. */
2945 pxTCB = prvGetTCBFromHandle( xTask );
2946
2947 /* Save the hook function in the TCB. A critical section is required as
2948 the value can be accessed from an interrupt. */
2949 taskENTER_CRITICAL();
2950 {
2951 xReturn = pxTCB->pxTaskTag;
2952 }
2953 taskEXIT_CRITICAL();
2954
2955 return xReturn;
2956 }
2957
2958#endif /* configUSE_APPLICATION_TASK_TAG */
2959/*-----------------------------------------------------------*/
2960
2961#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2962
2963 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
2964 {
2965 TCB_t *pxTCB;
2966 TaskHookFunction_t xReturn;
2967 UBaseType_t uxSavedInterruptStatus;
2968
2969 /* If xTask is NULL then set the calling task's hook. */
2970 pxTCB = prvGetTCBFromHandle( xTask );
2971
2972 /* Save the hook function in the TCB. A critical section is required as
2973 the value can be accessed from an interrupt. */
2974 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
2975 {
2976 xReturn = pxTCB->pxTaskTag;
2977 }
2978 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2979
2980 return xReturn;
2981 }
2982
2983#endif /* configUSE_APPLICATION_TASK_TAG */
2984/*-----------------------------------------------------------*/
2985
2986#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2987
2988 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2989 {
2990 TCB_t *xTCB;
2991 BaseType_t xReturn;
2992
2993 /* If xTask is NULL then we are calling our own task hook. */
2994 if( xTask == NULL )
2995 {
2996 xTCB = pxCurrentTCB;
2997 }
2998 else
2999 {
3000 xTCB = xTask;
3001 }
3002
3003 if( xTCB->pxTaskTag != NULL )
3004 {
3005 xReturn = xTCB->pxTaskTag( pvParameter );
3006 }
3007 else
3008 {
3009 xReturn = pdFAIL;
3010 }
3011
3012 return xReturn;
3013 }
3014
3015#endif /* configUSE_APPLICATION_TASK_TAG */
3016/*-----------------------------------------------------------*/
3017
3018void vTaskSwitchContext( void )
3019{
3020 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
3021 {
3022 /* The scheduler is currently suspended - do not allow a context
3023 switch. */
3024 xYieldPending = pdTRUE;
3025 }
3026 else
3027 {
3028 xYieldPending = pdFALSE;
3029 traceTASK_SWITCHED_OUT();
3030
3031 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3032 {
3033 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3034 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3035 #else
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003036 ulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003037 #endif
3038
3039 /* Add the amount of time the task has been running to the
3040 accumulated time so far. The time the task started running was
3041 stored in ulTaskSwitchedInTime. Note that there is no overflow
3042 protection here so count values are only valid until the timer
3043 overflows. The guard against negative values is to protect
3044 against suspect run time stat counter implementations - which
3045 are provided by the application, not the kernel. */
3046 if( ulTotalRunTime > ulTaskSwitchedInTime )
3047 {
3048 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
shijie.xiongc3059922022-06-13 11:41:49 +08003049#if CONFIG_FTRACE
3050 vTraceSwitchContext((uint32_t)pxCurrentTCB->uxTCBNumber);
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003051#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003052 }
3053 else
3054 {
3055 mtCOVERAGE_TEST_MARKER();
3056 }
3057 ulTaskSwitchedInTime = ulTotalRunTime;
3058 }
3059 #endif /* configGENERATE_RUN_TIME_STATS */
3060
3061 /* Check for stack overflow, if configured. */
3062 taskCHECK_FOR_STACK_OVERFLOW();
3063
3064 /* Before the currently running task is switched out, save its errno. */
3065 #if( configUSE_POSIX_ERRNO == 1 )
3066 {
3067 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
3068 }
3069 #endif
3070
3071 /* Select a new task to run using either the generic C or port
3072 optimised asm code. */
3073 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3074 traceTASK_SWITCHED_IN();
3075
3076 /* After the new task is switched in, update the global errno. */
3077 #if( configUSE_POSIX_ERRNO == 1 )
3078 {
3079 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3080 }
3081 #endif
3082
3083 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3084 {
3085 /* Switch Newlib's _impure_ptr variable to point to the _reent
3086 structure specific to this task. */
3087 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
3088 }
3089 #endif /* configUSE_NEWLIB_REENTRANT */
3090 }
3091}
3092/*-----------------------------------------------------------*/
3093
3094void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
3095{
3096 configASSERT( pxEventList );
3097
3098 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3099 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3100
3101 /* Place the event list item of the TCB in the appropriate event list.
3102 This is placed in the list in priority order so the highest priority task
3103 is the first to be woken by the event. The queue that contains the event
3104 list is locked, preventing simultaneous access from interrupts. */
3105 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3106
3107 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3108}
3109/*-----------------------------------------------------------*/
3110
3111void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3112{
3113 configASSERT( pxEventList );
3114
3115 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3116 the event groups implementation. */
3117 configASSERT( uxSchedulerSuspended != 0 );
3118
3119 /* Store the item value in the event list item. It is safe to access the
3120 event list item here as interrupts won't access the event list item of a
3121 task that is not in the Blocked state. */
3122 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3123
3124 /* Place the event list item of the TCB at the end of the appropriate event
3125 list. It is safe to access the event list here because it is part of an
3126 event group implementation - and interrupts don't access event groups
3127 directly (instead they access them indirectly by pending function calls to
3128 the task level). */
3129 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3130
3131 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3132}
3133/*-----------------------------------------------------------*/
3134
3135#if( configUSE_TIMERS == 1 )
3136
3137 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3138 {
3139 configASSERT( pxEventList );
3140
3141 /* This function should not be called by application code hence the
3142 'Restricted' in its name. It is not part of the public API. It is
3143 designed for use by kernel code, and has special calling requirements -
3144 it should be called with the scheduler suspended. */
3145
3146
3147 /* Place the event list item of the TCB in the appropriate event list.
3148 In this case it is assume that this is the only task that is going to
3149 be waiting on this event list, so the faster vListInsertEnd() function
3150 can be used in place of vListInsert. */
3151 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3152
3153 /* If the task should block indefinitely then set the block time to a
3154 value that will be recognised as an indefinite delay inside the
3155 prvAddCurrentTaskToDelayedList() function. */
3156 if( xWaitIndefinitely != pdFALSE )
3157 {
3158 xTicksToWait = portMAX_DELAY;
3159 }
3160
3161 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
3162 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
3163 }
3164
3165#endif /* configUSE_TIMERS */
3166/*-----------------------------------------------------------*/
3167
3168BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3169{
3170TCB_t *pxUnblockedTCB;
3171BaseType_t xReturn;
3172
3173 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3174 called from a critical section within an ISR. */
3175
3176 /* The event list is sorted in priority order, so the first in the list can
3177 be removed as it is known to be the highest priority. Remove the TCB from
3178 the delayed list, and add it to the ready list.
3179
3180 If an event is for a queue that is locked then this function will never
3181 get called - the lock count on the queue will get modified instead. This
3182 means exclusive access to the event list is guaranteed here.
3183
3184 This function assumes that a check has already been made to ensure that
3185 pxEventList is not empty. */
3186 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3187 configASSERT( pxUnblockedTCB );
3188 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3189
3190 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3191 {
3192 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3193 prvAddTaskToReadyList( pxUnblockedTCB );
3194
3195 #if( configUSE_TICKLESS_IDLE != 0 )
3196 {
3197 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3198 might be set to the blocked task's time out time. If the task is
3199 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3200 normally left unchanged, because it is automatically reset to a new
3201 value when the tick count equals xNextTaskUnblockTime. However if
3202 tickless idling is used it might be more important to enter sleep mode
3203 at the earliest possible time - so reset xNextTaskUnblockTime here to
3204 ensure it is updated at the earliest possible time. */
3205 prvResetNextTaskUnblockTime();
3206 }
3207 #endif
3208 }
3209 else
3210 {
3211 /* The delayed and ready lists cannot be accessed, so hold this task
3212 pending until the scheduler is resumed. */
3213 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
3214 }
3215
3216 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3217 {
3218 /* Return true if the task removed from the event list has a higher
3219 priority than the calling task. This allows the calling task to know if
3220 it should force a context switch now. */
3221 xReturn = pdTRUE;
3222
3223 /* Mark that a yield is pending in case the user is not using the
3224 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3225 xYieldPending = pdTRUE;
3226 }
3227 else
3228 {
3229 xReturn = pdFALSE;
3230 }
3231
3232 return xReturn;
3233}
3234/*-----------------------------------------------------------*/
3235
3236void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3237{
3238TCB_t *pxUnblockedTCB;
3239
3240 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3241 the event flags implementation. */
3242 configASSERT( uxSchedulerSuspended != pdFALSE );
3243
3244 /* Store the new item value in the event list. */
3245 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3246
3247 /* Remove the event list form the event flag. Interrupts do not access
3248 event flags. */
3249 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3250 configASSERT( pxUnblockedTCB );
3251 ( void ) uxListRemove( pxEventListItem );
3252
3253 /* Remove the task from the delayed list and add it to the ready list. The
3254 scheduler is suspended so interrupts will not be accessing the ready
3255 lists. */
3256 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3257 prvAddTaskToReadyList( pxUnblockedTCB );
3258
3259 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3260 {
3261 /* The unblocked task has a priority above that of the calling task, so
3262 a context switch is required. This function is called with the
3263 scheduler suspended so xYieldPending is set so the context switch
3264 occurs immediately that the scheduler is resumed (unsuspended). */
3265 xYieldPending = pdTRUE;
3266 }
3267}
3268/*-----------------------------------------------------------*/
3269
3270void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3271{
3272 configASSERT( pxTimeOut );
3273 taskENTER_CRITICAL();
3274 {
3275 pxTimeOut->xOverflowCount = xNumOfOverflows;
3276 pxTimeOut->xTimeOnEntering = xTickCount;
3277 }
3278 taskEXIT_CRITICAL();
3279}
3280/*-----------------------------------------------------------*/
3281
3282void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3283{
3284 /* For internal use only as it does not use a critical section. */
3285 pxTimeOut->xOverflowCount = xNumOfOverflows;
3286 pxTimeOut->xTimeOnEntering = xTickCount;
3287}
3288/*-----------------------------------------------------------*/
3289
3290BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3291{
3292BaseType_t xReturn;
3293
3294 configASSERT( pxTimeOut );
3295 configASSERT( pxTicksToWait );
3296
3297 taskENTER_CRITICAL();
3298 {
3299 /* Minor optimisation. The tick count cannot change in this block. */
3300 const TickType_t xConstTickCount = xTickCount;
3301 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3302
3303 #if( INCLUDE_xTaskAbortDelay == 1 )
3304 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
3305 {
3306 /* The delay was aborted, which is not the same as a time out,
3307 but has the same result. */
3308 pxCurrentTCB->ucDelayAborted = pdFALSE;
3309 xReturn = pdTRUE;
3310 }
3311 else
3312 #endif
3313
3314 #if ( INCLUDE_vTaskSuspend == 1 )
3315 if( *pxTicksToWait == portMAX_DELAY )
3316 {
3317 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3318 specified is the maximum block time then the task should block
3319 indefinitely, and therefore never time out. */
3320 xReturn = pdFALSE;
3321 }
3322 else
3323 #endif
3324
3325 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3326 {
3327 /* The tick count is greater than the time at which
3328 vTaskSetTimeout() was called, but has also overflowed since
3329 vTaskSetTimeOut() was called. It must have wrapped all the way
3330 around and gone past again. This passed since vTaskSetTimeout()
3331 was called. */
3332 xReturn = pdTRUE;
3333 }
3334 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3335 {
3336 /* Not a genuine timeout. Adjust parameters for time remaining. */
3337 *pxTicksToWait -= xElapsedTime;
3338 vTaskInternalSetTimeOutState( pxTimeOut );
3339 xReturn = pdFALSE;
3340 }
3341 else
3342 {
3343 *pxTicksToWait = 0;
3344 xReturn = pdTRUE;
3345 }
3346 }
3347 taskEXIT_CRITICAL();
3348
3349 return xReturn;
3350}
3351/*-----------------------------------------------------------*/
3352
3353void vTaskMissedYield( void )
3354{
3355 xYieldPending = pdTRUE;
3356}
3357/*-----------------------------------------------------------*/
3358
3359#if ( configUSE_TRACE_FACILITY == 1 )
3360
3361 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3362 {
3363 UBaseType_t uxReturn;
3364 TCB_t const *pxTCB;
3365
3366 if( xTask != NULL )
3367 {
3368 pxTCB = xTask;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003369 uxReturn = pxTCB->uxTCBNumber;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003370 }
3371 else
3372 {
3373 uxReturn = 0U;
3374 }
3375
3376 return uxReturn;
3377 }
3378
3379#endif /* configUSE_TRACE_FACILITY */
3380/*-----------------------------------------------------------*/
3381
3382#if ( configUSE_TRACE_FACILITY == 1 )
3383
3384 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3385 {
3386 TCB_t * pxTCB;
3387
3388 if( xTask != NULL )
3389 {
3390 pxTCB = xTask;
3391 pxTCB->uxTaskNumber = uxHandle;
3392 }
3393 }
3394
3395#endif /* configUSE_TRACE_FACILITY */
3396
3397/*
3398 * -----------------------------------------------------------
3399 * The Idle task.
3400 * ----------------------------------------------------------
3401 *
3402 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3403 * language extensions. The equivalent prototype for this function is:
3404 *
3405 * void prvIdleTask( void *pvParameters );
3406 *
3407 */
3408static portTASK_FUNCTION( prvIdleTask, pvParameters )
3409{
3410 /* Stop warnings. */
3411 ( void ) pvParameters;
3412
3413 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3414 SCHEDULER IS STARTED. **/
3415
3416 /* In case a task that has a secure context deletes itself, in which case
3417 the idle task is responsible for deleting the task's secure context, if
3418 any. */
3419 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3420
3421 for( ;; )
3422 {
3423 /* See if any tasks have deleted themselves - if so then the idle task
3424 is responsible for freeing the deleted task's TCB and stack. */
3425 prvCheckTasksWaitingTermination();
3426
3427 #if ( configUSE_PREEMPTION == 0 )
3428 {
3429 /* If we are not using preemption we keep forcing a task switch to
3430 see if any other task has become available. If we are using
3431 preemption we don't need to do this as any task becoming available
3432 will automatically get the processor anyway. */
3433 taskYIELD();
3434 }
3435 #endif /* configUSE_PREEMPTION */
3436
3437 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3438 {
3439 /* When using preemption tasks of equal priority will be
3440 timesliced. If a task that is sharing the idle priority is ready
3441 to run then the idle task should yield before the end of the
3442 timeslice.
3443
3444 A critical region is not required here as we are just reading from
3445 the list, and an occasional incorrect value will not matter. If
3446 the ready list at the idle priority contains more than one task
3447 then a task other than the idle task is ready to execute. */
3448 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3449 {
3450 taskYIELD();
3451 }
3452 else
3453 {
3454 mtCOVERAGE_TEST_MARKER();
3455 }
3456 }
3457 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3458
3459 #if ( configUSE_IDLE_HOOK == 1 )
3460 {
3461 extern void vApplicationIdleHook( void );
3462
3463 /* Call the user defined function from within the idle task. This
3464 allows the application designer to add background functionality
3465 without the overhead of a separate task.
3466 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3467 CALL A FUNCTION THAT MIGHT BLOCK. */
3468 vApplicationIdleHook();
3469 }
3470 #endif /* configUSE_IDLE_HOOK */
3471
3472 /* This conditional compilation should use inequality to 0, not equality
3473 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3474 user defined low power mode implementations require
3475 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3476 #if ( configUSE_TICKLESS_IDLE != 0 )
3477 {
3478 TickType_t xExpectedIdleTime;
3479
3480 /* It is not desirable to suspend then resume the scheduler on
3481 each iteration of the idle task. Therefore, a preliminary
3482 test of the expected idle time is performed without the
3483 scheduler suspended. The result here is not necessarily
3484 valid. */
3485 xExpectedIdleTime = prvGetExpectedIdleTime();
3486
3487 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3488 {
3489 vTaskSuspendAll();
3490 {
3491 /* Now the scheduler is suspended, the expected idle
3492 time can be sampled again, and this time its value can
3493 be used. */
3494 configASSERT( xNextTaskUnblockTime >= xTickCount );
3495 xExpectedIdleTime = prvGetExpectedIdleTime();
3496
3497 /* Define the following macro to set xExpectedIdleTime to 0
3498 if the application does not want
3499 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3500 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3501
3502 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3503 {
3504 traceLOW_POWER_IDLE_BEGIN();
3505 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3506 traceLOW_POWER_IDLE_END();
3507 }
3508 else
3509 {
3510 mtCOVERAGE_TEST_MARKER();
3511 }
3512 }
3513 ( void ) xTaskResumeAll();
3514 }
3515 else
3516 {
3517 mtCOVERAGE_TEST_MARKER();
3518 }
3519 }
3520 #endif /* configUSE_TICKLESS_IDLE */
3521 }
3522}
3523/*-----------------------------------------------------------*/
3524
3525#if( configUSE_TICKLESS_IDLE != 0 )
3526
3527 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3528 {
3529 /* The idle task exists in addition to the application tasks. */
3530 const UBaseType_t uxNonApplicationTasks = 1;
3531 eSleepModeStatus eReturn = eStandardSleep;
3532
3533 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
3534 {
3535 /* A task was made ready while the scheduler was suspended. */
3536 eReturn = eAbortSleep;
3537 }
3538 else if( xYieldPending != pdFALSE )
3539 {
3540 /* A yield was pended while the scheduler was suspended. */
3541 eReturn = eAbortSleep;
3542 }
3543 else
3544 {
3545 /* If all the tasks are in the suspended list (which might mean they
3546 have an infinite block time rather than actually being suspended)
3547 then it is safe to turn all clocks off and just wait for external
3548 interrupts. */
3549 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3550 {
3551 eReturn = eNoTasksWaitingTimeout;
3552 }
3553 else
3554 {
3555 mtCOVERAGE_TEST_MARKER();
3556 }
3557 }
3558
3559 return eReturn;
3560 }
3561
3562#endif /* configUSE_TICKLESS_IDLE */
3563/*-----------------------------------------------------------*/
3564
3565#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3566
3567 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3568 {
3569 TCB_t *pxTCB;
3570
3571 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3572 {
3573 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3574 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3575 }
3576 }
3577
3578#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3579/*-----------------------------------------------------------*/
3580
3581#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3582
3583 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3584 {
3585 void *pvReturn = NULL;
3586 TCB_t *pxTCB;
3587
3588 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3589 {
3590 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3591 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3592 }
3593 else
3594 {
3595 pvReturn = NULL;
3596 }
3597
3598 return pvReturn;
3599 }
3600
3601#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3602/*-----------------------------------------------------------*/
3603
3604#if ( portUSING_MPU_WRAPPERS == 1 )
3605
3606 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3607 {
3608 TCB_t *pxTCB;
3609
3610 /* If null is passed in here then we are modifying the MPU settings of
3611 the calling task. */
3612 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3613
3614 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3615 }
3616
3617#endif /* portUSING_MPU_WRAPPERS */
3618/*-----------------------------------------------------------*/
3619
3620static void prvInitialiseTaskLists( void )
3621{
3622UBaseType_t uxPriority;
3623
3624 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3625 {
3626 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3627 }
3628
3629 vListInitialise( &xDelayedTaskList1 );
3630 vListInitialise( &xDelayedTaskList2 );
3631 vListInitialise( &xPendingReadyList );
3632
3633 #if ( INCLUDE_vTaskDelete == 1 )
3634 {
3635 vListInitialise( &xTasksWaitingTermination );
3636 }
3637 #endif /* INCLUDE_vTaskDelete */
3638
3639 #if ( INCLUDE_vTaskSuspend == 1 )
3640 {
3641 vListInitialise( &xSuspendedTaskList );
3642 }
3643 #endif /* INCLUDE_vTaskSuspend */
3644
3645 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3646 using list2. */
3647 pxDelayedTaskList = &xDelayedTaskList1;
3648 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3649}
3650/*-----------------------------------------------------------*/
3651
3652static void prvCheckTasksWaitingTermination( void )
3653{
3654
3655 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
3656
3657 #if ( INCLUDE_vTaskDelete == 1 )
3658 {
3659 TCB_t *pxTCB;
3660
3661 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
3662 being called too often in the idle task. */
3663 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
3664 {
3665 taskENTER_CRITICAL();
3666 {
3667 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3668 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3669 --uxCurrentNumberOfTasks;
3670 --uxDeletedTasksWaitingCleanUp;
3671 }
3672 taskEXIT_CRITICAL();
3673
3674 prvDeleteTCB( pxTCB );
3675 }
3676 }
3677 #endif /* INCLUDE_vTaskDelete */
3678}
3679/*-----------------------------------------------------------*/
3680
3681#if( configUSE_TRACE_FACILITY == 1 )
3682
3683 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
3684 {
3685 TCB_t *pxTCB;
3686
3687 /* xTask is NULL then get the state of the calling task. */
3688 pxTCB = prvGetTCBFromHandle( xTask );
3689
3690 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
3691 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
3692 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
3693 pxTaskStatus->pxStackBase = pxTCB->pxStack;
3694 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003695 pxTaskStatus->uStackTotal = pxTCB->uStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003696
3697 #if ( configUSE_MUTEXES == 1 )
3698 {
3699 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
3700 }
3701 #else
3702 {
3703 pxTaskStatus->uxBasePriority = 0;
3704 }
3705 #endif
3706
3707 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3708 {
3709 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
3710 }
3711 #else
3712 {
3713 pxTaskStatus->ulRunTimeCounter = 0;
3714 }
3715 #endif
3716
3717 /* Obtaining the task state is a little fiddly, so is only done if the
3718 value of eState passed into this function is eInvalid - otherwise the
3719 state is just set to whatever is passed in. */
3720 if( eState != eInvalid )
3721 {
3722 if( pxTCB == pxCurrentTCB )
3723 {
3724 pxTaskStatus->eCurrentState = eRunning;
3725 }
3726 else
3727 {
3728 pxTaskStatus->eCurrentState = eState;
3729
3730 #if ( INCLUDE_vTaskSuspend == 1 )
3731 {
3732 /* If the task is in the suspended list then there is a
3733 chance it is actually just blocked indefinitely - so really
3734 it should be reported as being in the Blocked state. */
3735 if( eState == eSuspended )
3736 {
3737 vTaskSuspendAll();
3738 {
3739 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3740 {
3741 pxTaskStatus->eCurrentState = eBlocked;
3742 }
3743 }
3744 ( void ) xTaskResumeAll();
3745 }
3746 }
3747 #endif /* INCLUDE_vTaskSuspend */
3748 }
3749 }
3750 else
3751 {
3752 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
3753 }
3754
3755 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
3756 parameter is provided to allow it to be skipped. */
3757 if( xGetFreeStackSpace != pdFALSE )
3758 {
3759 #if ( portSTACK_GROWTH > 0 )
3760 {
3761 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
3762 }
3763 #else
3764 {
3765 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
3766 }
3767 #endif
3768 }
3769 else
3770 {
3771 pxTaskStatus->usStackHighWaterMark = 0;
3772 }
3773 }
3774
3775#endif /* configUSE_TRACE_FACILITY */
3776/*-----------------------------------------------------------*/
3777
3778#if ( configUSE_TRACE_FACILITY == 1 )
3779
3780 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3781 {
3782 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
3783 UBaseType_t uxTask = 0;
3784
3785 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3786 {
3787 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3788
3789 /* Populate an TaskStatus_t structure within the
3790 pxTaskStatusArray array for each task that is referenced from
3791 pxList. See the definition of TaskStatus_t in task.h for the
3792 meaning of each TaskStatus_t structure member. */
3793 do
3794 {
3795 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3796 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
3797 uxTask++;
3798 } while( pxNextTCB != pxFirstTCB );
3799 }
3800 else
3801 {
3802 mtCOVERAGE_TEST_MARKER();
3803 }
3804
3805 return uxTask;
3806 }
3807
3808#endif /* configUSE_TRACE_FACILITY */
3809/*-----------------------------------------------------------*/
3810
3811#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
3812
3813 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3814 {
3815 uint32_t ulCount = 0U;
3816
3817 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3818 {
3819 pucStackByte -= portSTACK_GROWTH;
3820 ulCount++;
3821 }
3822
3823 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3824
3825 return ( configSTACK_DEPTH_TYPE ) ulCount;
3826 }
3827
3828#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
3829/*-----------------------------------------------------------*/
3830
3831#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
3832
3833 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
3834 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
3835 user to determine the return type. It gets around the problem of the value
3836 overflowing on 8-bit types without breaking backward compatibility for
3837 applications that expect an 8-bit return type. */
3838 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
3839 {
3840 TCB_t *pxTCB;
3841 uint8_t *pucEndOfStack;
3842 configSTACK_DEPTH_TYPE uxReturn;
3843
3844 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
3845 the same except for their return type. Using configSTACK_DEPTH_TYPE
3846 allows the user to determine the return type. It gets around the
3847 problem of the value overflowing on 8-bit types without breaking
3848 backward compatibility for applications that expect an 8-bit return
3849 type. */
3850
3851 pxTCB = prvGetTCBFromHandle( xTask );
3852
3853 #if portSTACK_GROWTH < 0
3854 {
3855 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3856 }
3857 #else
3858 {
3859 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3860 }
3861 #endif
3862
3863 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
3864
3865 return uxReturn;
3866 }
3867
3868#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
3869/*-----------------------------------------------------------*/
3870
3871#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3872
3873 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3874 {
3875 TCB_t *pxTCB;
3876 uint8_t *pucEndOfStack;
3877 UBaseType_t uxReturn;
3878
3879 pxTCB = prvGetTCBFromHandle( xTask );
3880
3881 #if portSTACK_GROWTH < 0
3882 {
3883 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3884 }
3885 #else
3886 {
3887 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3888 }
3889 #endif
3890
3891 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3892
3893 return uxReturn;
3894 }
3895
3896#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3897/*-----------------------------------------------------------*/
3898
3899#if ( INCLUDE_vTaskDelete == 1 )
3900
3901 static void prvDeleteTCB( TCB_t *pxTCB )
3902 {
3903 /* This call is required specifically for the TriCore port. It must be
3904 above the vPortFree() calls. The call is also used by ports/demos that
3905 want to allocate and clean RAM statically. */
3906 portCLEAN_UP_TCB( pxTCB );
3907
3908 /* Free up the memory allocated by the scheduler for the task. It is up
3909 to the task to free any memory allocated at the application level. */
3910 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3911 {
3912 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3913 }
3914 #endif /* configUSE_NEWLIB_REENTRANT */
3915
3916 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3917 {
3918 /* The task can only have been allocated dynamically - free both
3919 the stack and TCB. */
3920 vPortFree( pxTCB->pxStack );
3921 vPortFree( pxTCB );
3922 }
3923 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
3924 {
3925 /* The task could have been allocated statically or dynamically, so
3926 check what was statically allocated before trying to free the
3927 memory. */
3928 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3929 {
3930 /* Both the stack and TCB were allocated dynamically, so both
3931 must be freed. */
3932 vPortFree( pxTCB->pxStack );
3933 vPortFree( pxTCB );
3934 }
3935 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3936 {
3937 /* Only the stack was statically allocated, so the TCB is the
3938 only memory that must be freed. */
3939 vPortFree( pxTCB );
3940 }
3941 else
3942 {
3943 /* Neither the stack nor the TCB were allocated dynamically, so
3944 nothing needs to be freed. */
3945 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
3946 mtCOVERAGE_TEST_MARKER();
3947 }
3948 }
3949 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3950 }
3951
3952#endif /* INCLUDE_vTaskDelete */
3953/*-----------------------------------------------------------*/
3954
3955static void prvResetNextTaskUnblockTime( void )
3956{
3957TCB_t *pxTCB;
3958
3959 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3960 {
3961 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
3962 the maximum possible value so it is extremely unlikely that the
3963 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3964 there is an item in the delayed list. */
3965 xNextTaskUnblockTime = portMAX_DELAY;
3966 }
3967 else
3968 {
3969 /* The new current delayed list is not empty, get the value of
3970 the item at the head of the delayed list. This is the time at
3971 which the task at the head of the delayed list should be removed
3972 from the Blocked state. */
3973 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3974 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
3975 }
3976}
3977/*-----------------------------------------------------------*/
3978
3979#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3980
3981 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3982 {
3983 TaskHandle_t xReturn;
3984
3985 /* A critical section is not required as this is not called from
3986 an interrupt and the current TCB will always be the same for any
3987 individual execution thread. */
3988 xReturn = pxCurrentTCB;
3989
3990 return xReturn;
3991 }
3992
3993#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
3994/*-----------------------------------------------------------*/
3995
3996#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
3997
3998 BaseType_t xTaskGetSchedulerState( void )
3999 {
4000 BaseType_t xReturn;
4001
4002 if( xSchedulerRunning == pdFALSE )
4003 {
4004 xReturn = taskSCHEDULER_NOT_STARTED;
4005 }
4006 else
4007 {
4008 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
4009 {
4010 xReturn = taskSCHEDULER_RUNNING;
4011 }
4012 else
4013 {
4014 xReturn = taskSCHEDULER_SUSPENDED;
4015 }
4016 }
4017
4018 return xReturn;
4019 }
4020
4021#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4022/*-----------------------------------------------------------*/
4023
4024#if ( configUSE_MUTEXES == 1 )
4025
4026 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4027 {
4028 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4029 BaseType_t xReturn = pdFALSE;
4030
4031 /* If the mutex was given back by an interrupt while the queue was
4032 locked then the mutex holder might now be NULL. _RB_ Is this still
4033 needed as interrupts can no longer use mutexes? */
4034 if( pxMutexHolder != NULL )
4035 {
4036 /* If the holder of the mutex has a priority below the priority of
4037 the task attempting to obtain the mutex then it will temporarily
4038 inherit the priority of the task attempting to obtain the mutex. */
4039 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
4040 {
4041 /* Adjust the mutex holder state to account for its new
4042 priority. Only reset the event list item value if the value is
4043 not being used for anything else. */
4044 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4045 {
4046 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4047 }
4048 else
4049 {
4050 mtCOVERAGE_TEST_MARKER();
4051 }
4052
4053 /* If the task being modified is in the ready state it will need
4054 to be moved into a new list. */
4055 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4056 {
4057 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4058 {
4059 taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );
4060 }
4061 else
4062 {
4063 mtCOVERAGE_TEST_MARKER();
4064 }
4065
4066 /* Inherit the priority before being moved into the new list. */
4067 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4068 prvAddTaskToReadyList( pxMutexHolderTCB );
4069 }
4070 else
4071 {
4072 /* Just inherit the priority. */
4073 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4074 }
4075
4076 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
4077
4078 /* Inheritance occurred. */
4079 xReturn = pdTRUE;
4080 }
4081 else
4082 {
4083 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
4084 {
4085 /* The base priority of the mutex holder is lower than the
4086 priority of the task attempting to take the mutex, but the
4087 current priority of the mutex holder is not lower than the
4088 priority of the task attempting to take the mutex.
4089 Therefore the mutex holder must have already inherited a
4090 priority, but inheritance would have occurred if that had
4091 not been the case. */
4092 xReturn = pdTRUE;
4093 }
4094 else
4095 {
4096 mtCOVERAGE_TEST_MARKER();
4097 }
4098 }
4099 }
4100 else
4101 {
4102 mtCOVERAGE_TEST_MARKER();
4103 }
4104
4105 return xReturn;
4106 }
4107
4108#endif /* configUSE_MUTEXES */
4109/*-----------------------------------------------------------*/
4110
4111#if ( configUSE_MUTEXES == 1 )
4112
4113 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4114 {
4115 TCB_t * const pxTCB = pxMutexHolder;
4116 BaseType_t xReturn = pdFALSE;
4117
4118 if( pxMutexHolder != NULL )
4119 {
4120 /* A task can only have an inherited priority if it holds the mutex.
4121 If the mutex is held by a task then it cannot be given from an
4122 interrupt, and if a mutex is given by the holding task then it must
4123 be the running state task. */
4124 configASSERT( pxTCB == pxCurrentTCB );
4125 configASSERT( pxTCB->uxMutexesHeld );
4126 ( pxTCB->uxMutexesHeld )--;
4127
4128 /* Has the holder of the mutex inherited the priority of another
4129 task? */
4130 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4131 {
4132 /* Only disinherit if no other mutexes are held. */
4133 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4134 {
4135 /* A task can only have an inherited priority if it holds
4136 the mutex. If the mutex is held by a task then it cannot be
4137 given from an interrupt, and if a mutex is given by the
4138 holding task then it must be the running state task. Remove
4139 the holding task from the ready list. */
4140 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4141 {
4142 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4143 }
4144 else
4145 {
4146 mtCOVERAGE_TEST_MARKER();
4147 }
4148
4149 /* Disinherit the priority before adding the task into the
4150 new ready list. */
4151 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4152 pxTCB->uxPriority = pxTCB->uxBasePriority;
4153
4154 /* Reset the event list item value. It cannot be in use for
4155 any other purpose if this task is running, and it must be
4156 running to give back the mutex. */
4157 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4158 prvAddTaskToReadyList( pxTCB );
4159
4160 /* Return true to indicate that a context switch is required.
4161 This is only actually required in the corner case whereby
4162 multiple mutexes were held and the mutexes were given back
4163 in an order different to that in which they were taken.
4164 If a context switch did not occur when the first mutex was
4165 returned, even if a task was waiting on it, then a context
4166 switch should occur when the last mutex is returned whether
4167 a task is waiting on it or not. */
4168 xReturn = pdTRUE;
4169 }
4170 else
4171 {
4172 mtCOVERAGE_TEST_MARKER();
4173 }
4174 }
4175 else
4176 {
4177 mtCOVERAGE_TEST_MARKER();
4178 }
4179 }
4180 else
4181 {
4182 mtCOVERAGE_TEST_MARKER();
4183 }
4184
4185 return xReturn;
4186 }
4187
4188#endif /* configUSE_MUTEXES */
4189/*-----------------------------------------------------------*/
4190
4191#if ( configUSE_MUTEXES == 1 )
4192
4193 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
4194 {
4195 TCB_t * const pxTCB = pxMutexHolder;
4196 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4197 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4198
4199 if( pxMutexHolder != NULL )
4200 {
4201 /* If pxMutexHolder is not NULL then the holder must hold at least
4202 one mutex. */
4203 configASSERT( pxTCB->uxMutexesHeld );
4204
4205 /* Determine the priority to which the priority of the task that
4206 holds the mutex should be set. This will be the greater of the
4207 holding task's base priority and the priority of the highest
4208 priority task that is waiting to obtain the mutex. */
4209 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4210 {
4211 uxPriorityToUse = uxHighestPriorityWaitingTask;
4212 }
4213 else
4214 {
4215 uxPriorityToUse = pxTCB->uxBasePriority;
4216 }
4217
4218 /* Does the priority need to change? */
4219 if( pxTCB->uxPriority != uxPriorityToUse )
4220 {
4221 /* Only disinherit if no other mutexes are held. This is a
4222 simplification in the priority inheritance implementation. If
4223 the task that holds the mutex is also holding other mutexes then
4224 the other mutexes may have caused the priority inheritance. */
4225 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4226 {
4227 /* If a task has timed out because it already holds the
4228 mutex it was trying to obtain then it cannot of inherited
4229 its own priority. */
4230 configASSERT( pxTCB != pxCurrentTCB );
4231
4232 /* Disinherit the priority, remembering the previous
4233 priority to facilitate determining the subject task's
4234 state. */
4235 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4236 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4237 pxTCB->uxPriority = uxPriorityToUse;
4238
4239 /* Only reset the event list item value if the value is not
4240 being used for anything else. */
4241 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4242 {
4243 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4244 }
4245 else
4246 {
4247 mtCOVERAGE_TEST_MARKER();
4248 }
4249
4250 /* If the running task is not the task that holds the mutex
4251 then the task that holds the mutex could be in either the
4252 Ready, Blocked or Suspended states. Only remove the task
4253 from its current state list if it is in the Ready state as
4254 the task's priority is going to change and there is one
4255 Ready list per priority. */
4256 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4257 {
4258 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4259 {
4260 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4261 }
4262 else
4263 {
4264 mtCOVERAGE_TEST_MARKER();
4265 }
4266
4267 prvAddTaskToReadyList( pxTCB );
4268 }
4269 else
4270 {
4271 mtCOVERAGE_TEST_MARKER();
4272 }
4273 }
4274 else
4275 {
4276 mtCOVERAGE_TEST_MARKER();
4277 }
4278 }
4279 else
4280 {
4281 mtCOVERAGE_TEST_MARKER();
4282 }
4283 }
4284 else
4285 {
4286 mtCOVERAGE_TEST_MARKER();
4287 }
4288 }
4289
4290#endif /* configUSE_MUTEXES */
4291/*-----------------------------------------------------------*/
4292
4293#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4294
4295 void vTaskEnterCritical( void )
4296 {
4297 portDISABLE_INTERRUPTS();
4298
4299 if( xSchedulerRunning != pdFALSE )
4300 {
4301 ( pxCurrentTCB->uxCriticalNesting )++;
4302
4303 /* This is not the interrupt safe version of the enter critical
4304 function so assert() if it is being called from an interrupt
4305 context. Only API functions that end in "FromISR" can be used in an
4306 interrupt. Only assert if the critical nesting count is 1 to
4307 protect against recursive calls if the assert function also uses a
4308 critical section. */
4309 if( pxCurrentTCB->uxCriticalNesting == 1 )
4310 {
4311 portASSERT_IF_IN_ISR();
4312 }
4313 }
4314 else
4315 {
4316 mtCOVERAGE_TEST_MARKER();
4317 }
4318 }
4319
4320#endif /* portCRITICAL_NESTING_IN_TCB */
4321/*-----------------------------------------------------------*/
4322
4323#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4324
4325 void vTaskExitCritical( void )
4326 {
4327 if( xSchedulerRunning != pdFALSE )
4328 {
4329 if( pxCurrentTCB->uxCriticalNesting > 0U )
4330 {
4331 ( pxCurrentTCB->uxCriticalNesting )--;
4332
4333 if( pxCurrentTCB->uxCriticalNesting == 0U )
4334 {
4335 portENABLE_INTERRUPTS();
4336 }
4337 else
4338 {
4339 mtCOVERAGE_TEST_MARKER();
4340 }
4341 }
4342 else
4343 {
4344 mtCOVERAGE_TEST_MARKER();
4345 }
4346 }
4347 else
4348 {
4349 mtCOVERAGE_TEST_MARKER();
4350 }
4351 }
4352
4353#endif /* portCRITICAL_NESTING_IN_TCB */
4354/*-----------------------------------------------------------*/
4355
4356#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4357
4358 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4359 {
4360 size_t x;
4361
4362 /* Start by copying the entire string. */
4363 strcpy( pcBuffer, pcTaskName );
4364
4365 /* Pad the end of the string with spaces to ensure columns line up when
4366 printed out. */
4367 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4368 {
4369 pcBuffer[ x ] = ' ';
4370 }
4371
4372 /* Terminate. */
4373 pcBuffer[ x ] = ( char ) 0x00;
4374
4375 /* Return the new end of string. */
4376 return &( pcBuffer[ x ] );
4377 }
4378
4379#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4380/*-----------------------------------------------------------*/
4381
4382#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4383
4384 void vTaskList( char * pcWriteBuffer )
4385 {
4386 TaskStatus_t *pxTaskStatusArray;
4387 UBaseType_t uxArraySize, x;
4388 char cStatus;
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004389 uint32_t ulTotalTime, ulStatsAsPercentage;
4390
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004391
4392 /*
4393 * PLEASE NOTE:
4394 *
4395 * This function is provided for convenience only, and is used by many
4396 * of the demo applications. Do not consider it to be part of the
4397 * scheduler.
4398 *
4399 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4400 * uxTaskGetSystemState() output into a human readable table that
4401 * displays task names, states and stack usage.
4402 *
4403 * vTaskList() has a dependency on the sprintf() C library function that
4404 * might bloat the code size, use a lot of stack, and provide different
4405 * results on different platforms. An alternative, tiny, third party,
4406 * and limited functionality implementation of sprintf() is provided in
4407 * many of the FreeRTOS/Demo sub-directories in a file called
4408 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4409 * snprintf() implementation!).
4410 *
4411 * It is recommended that production systems call uxTaskGetSystemState()
4412 * directly to get access to raw stats data, rather than indirectly
4413 * through a call to vTaskList().
4414 */
4415
4416
4417 /* Make sure the write buffer does not contain a string. */
4418 *pcWriteBuffer = ( char ) 0x00;
4419
4420 /* Take a snapshot of the number of tasks in case it changes while this
4421 function is executing. */
4422 uxArraySize = uxCurrentNumberOfTasks;
4423
4424 /* Allocate an array index for each task. NOTE! if
4425 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4426 equate to NULL. */
4427 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4428
4429 if( pxTaskStatusArray != NULL )
4430 {
4431 /* Generate the (binary) data. */
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004432 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4433 ulTotalTime /= 100UL;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004434
4435 /* Create a human readable table from the binary data. */
4436 for( x = 0; x < uxArraySize; x++ )
4437 {
4438 switch( pxTaskStatusArray[ x ].eCurrentState )
4439 {
4440 case eRunning: cStatus = tskRUNNING_CHAR;
4441 break;
4442
4443 case eReady: cStatus = tskREADY_CHAR;
4444 break;
4445
4446 case eBlocked: cStatus = tskBLOCKED_CHAR;
4447 break;
4448
4449 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4450 break;
4451
4452 case eDeleted: cStatus = tskDELETED_CHAR;
4453 break;
4454
4455 case eInvalid: /* Fall through. */
4456 default: /* Should not get here, but it is included
4457 to prevent static checking errors. */
4458 cStatus = ( char ) 0x00;
4459 break;
4460 }
4461
4462 /* Write the task name to the string, padding with spaces so it
4463 can be printed in tabular form more easily. */
4464 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4465
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004466
4467 ulStatsAsPercentage = ulTotalTime == 0 ? 0 : pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004468 /* Write the rest of the string. */
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004469 sprintf( pcWriteBuffer, "\t%u\t%c\t%u\t\t%u\t\t%u\t\t%u\t%u\t\r\n",
4470 ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
4471 cStatus,
4472 ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
4473 ( unsigned int ) pxTaskStatusArray[ x ].uStackTotal,
4474 ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
4475 ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
4476 ( unsigned int ) ulStatsAsPercentage);
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004477 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4478 }
4479
4480 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4481 is 0 then vPortFree() will be #defined to nothing. */
4482 vPortFree( pxTaskStatusArray );
4483 }
4484 else
4485 {
4486 mtCOVERAGE_TEST_MARKER();
4487 }
4488 }
4489
4490#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
4491/*----------------------------------------------------------*/
4492
4493#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4494
4495 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4496 {
4497 TaskStatus_t *pxTaskStatusArray;
4498 UBaseType_t uxArraySize, x;
4499 uint32_t ulTotalTime, ulStatsAsPercentage;
4500
4501 #if( configUSE_TRACE_FACILITY != 1 )
4502 {
4503 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4504 }
4505 #endif
4506
4507 /*
4508 * PLEASE NOTE:
4509 *
4510 * This function is provided for convenience only, and is used by many
4511 * of the demo applications. Do not consider it to be part of the
4512 * scheduler.
4513 *
4514 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4515 * of the uxTaskGetSystemState() output into a human readable table that
4516 * displays the amount of time each task has spent in the Running state
4517 * in both absolute and percentage terms.
4518 *
4519 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4520 * function that might bloat the code size, use a lot of stack, and
4521 * provide different results on different platforms. An alternative,
4522 * tiny, third party, and limited functionality implementation of
4523 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4524 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4525 * a full snprintf() implementation!).
4526 *
4527 * It is recommended that production systems call uxTaskGetSystemState()
4528 * directly to get access to raw stats data, rather than indirectly
4529 * through a call to vTaskGetRunTimeStats().
4530 */
4531
4532 /* Make sure the write buffer does not contain a string. */
4533 *pcWriteBuffer = ( char ) 0x00;
4534
4535 /* Take a snapshot of the number of tasks in case it changes while this
4536 function is executing. */
4537 uxArraySize = uxCurrentNumberOfTasks;
4538
4539 /* Allocate an array index for each task. NOTE! If
4540 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4541 equate to NULL. */
4542 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4543
4544 if( pxTaskStatusArray != NULL )
4545 {
4546 /* Generate the (binary) data. */
4547 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4548
4549 /* For percentage calculations. */
4550 ulTotalTime /= 100UL;
4551
4552 /* Avoid divide by zero errors. */
4553 if( ulTotalTime > 0UL )
4554 {
4555 /* Create a human readable table from the binary data. */
4556 for( x = 0; x < uxArraySize; x++ )
4557 {
4558 /* What percentage of the total run time has the task used?
4559 This will always be rounded down to the nearest integer.
4560 ulTotalRunTimeDiv100 has already been divided by 100. */
4561 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
4562
4563 /* Write the task name to the string, padding with
4564 spaces so it can be printed in tabular form more
4565 easily. */
4566 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4567
4568 if( ulStatsAsPercentage > 0UL )
4569 {
4570 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4571 {
4572 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4573 }
4574 #else
4575 {
4576 /* sizeof( int ) == sizeof( long ) so a smaller
4577 printf() library can be used. */
4578 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4579 }
4580 #endif
4581 }
4582 else
4583 {
4584 /* If the percentage is zero here then the task has
4585 consumed less than 1% of the total run time. */
4586 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4587 {
4588 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4589 }
4590 #else
4591 {
4592 /* sizeof( int ) == sizeof( long ) so a smaller
4593 printf() library can be used. */
4594 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4595 }
4596 #endif
4597 }
4598
4599 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4600 }
4601 }
4602 else
4603 {
4604 mtCOVERAGE_TEST_MARKER();
4605 }
4606
4607 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4608 is 0 then vPortFree() will be #defined to nothing. */
4609 vPortFree( pxTaskStatusArray );
4610 }
4611 else
4612 {
4613 mtCOVERAGE_TEST_MARKER();
4614 }
4615 }
4616
4617#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
4618/*-----------------------------------------------------------*/
4619
4620TickType_t uxTaskResetEventItemValue( void )
4621{
4622TickType_t uxReturn;
4623
4624 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
4625
4626 /* Reset the event list item to its normal value - so it can be used with
4627 queues and semaphores. */
4628 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4629
4630 return uxReturn;
4631}
4632/*-----------------------------------------------------------*/
4633
4634#if ( configUSE_MUTEXES == 1 )
4635
4636 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
4637 {
4638 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4639 then pxCurrentTCB will be NULL. */
4640 if( pxCurrentTCB != NULL )
4641 {
4642 ( pxCurrentTCB->uxMutexesHeld )++;
4643 }
4644
4645 return pxCurrentTCB;
4646 }
4647
4648#endif /* configUSE_MUTEXES */
4649/*-----------------------------------------------------------*/
4650
4651#if( configUSE_TASK_NOTIFICATIONS == 1 )
4652
4653 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4654 {
4655 uint32_t ulReturn;
4656
4657 taskENTER_CRITICAL();
4658 {
4659 /* Only block if the notification count is not already non-zero. */
4660 if( pxCurrentTCB->ulNotifiedValue == 0UL )
4661 {
4662 /* Mark this task as waiting for a notification. */
4663 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4664
4665 if( xTicksToWait > ( TickType_t ) 0 )
4666 {
4667 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4668 traceTASK_NOTIFY_TAKE_BLOCK();
4669
4670 /* All ports are written to allow a yield in a critical
4671 section (some will yield immediately, others wait until the
4672 critical section exits) - but it is not something that
4673 application code should ever do. */
4674 portYIELD_WITHIN_API();
4675 }
4676 else
4677 {
4678 mtCOVERAGE_TEST_MARKER();
4679 }
4680 }
4681 else
4682 {
4683 mtCOVERAGE_TEST_MARKER();
4684 }
4685 }
4686 taskEXIT_CRITICAL();
4687
4688 taskENTER_CRITICAL();
4689 {
4690 traceTASK_NOTIFY_TAKE();
4691 ulReturn = pxCurrentTCB->ulNotifiedValue;
4692
4693 if( ulReturn != 0UL )
4694 {
4695 if( xClearCountOnExit != pdFALSE )
4696 {
4697 pxCurrentTCB->ulNotifiedValue = 0UL;
4698 }
4699 else
4700 {
4701 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
4702 }
4703 }
4704 else
4705 {
4706 mtCOVERAGE_TEST_MARKER();
4707 }
4708
4709 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4710 }
4711 taskEXIT_CRITICAL();
4712
4713 return ulReturn;
4714 }
4715
4716#endif /* configUSE_TASK_NOTIFICATIONS */
4717/*-----------------------------------------------------------*/
4718
4719#if( configUSE_TASK_NOTIFICATIONS == 1 )
4720
4721 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4722 {
4723 BaseType_t xReturn;
4724
4725 taskENTER_CRITICAL();
4726 {
4727 /* Only block if a notification is not already pending. */
4728 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4729 {
4730 /* Clear bits in the task's notification value as bits may get
4731 set by the notifying task or interrupt. This can be used to
4732 clear the value to zero. */
4733 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4734
4735 /* Mark this task as waiting for a notification. */
4736 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4737
4738 if( xTicksToWait > ( TickType_t ) 0 )
4739 {
4740 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4741 traceTASK_NOTIFY_WAIT_BLOCK();
4742
4743 /* All ports are written to allow a yield in a critical
4744 section (some will yield immediately, others wait until the
4745 critical section exits) - but it is not something that
4746 application code should ever do. */
4747 portYIELD_WITHIN_API();
4748 }
4749 else
4750 {
4751 mtCOVERAGE_TEST_MARKER();
4752 }
4753 }
4754 else
4755 {
4756 mtCOVERAGE_TEST_MARKER();
4757 }
4758 }
4759 taskEXIT_CRITICAL();
4760
4761 taskENTER_CRITICAL();
4762 {
4763 traceTASK_NOTIFY_WAIT();
4764
4765 if( pulNotificationValue != NULL )
4766 {
4767 /* Output the current notification value, which may or may not
4768 have changed. */
4769 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
4770 }
4771
4772 /* If ucNotifyValue is set then either the task never entered the
4773 blocked state (because a notification was already pending) or the
4774 task unblocked because of a notification. Otherwise the task
4775 unblocked because of a timeout. */
4776 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4777 {
4778 /* A notification was not received. */
4779 xReturn = pdFALSE;
4780 }
4781 else
4782 {
4783 /* A notification was already pending or a notification was
4784 received while the task was waiting. */
4785 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
4786 xReturn = pdTRUE;
4787 }
4788
4789 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4790 }
4791 taskEXIT_CRITICAL();
4792
4793 return xReturn;
4794 }
4795
4796#endif /* configUSE_TASK_NOTIFICATIONS */
4797/*-----------------------------------------------------------*/
4798
4799#if( configUSE_TASK_NOTIFICATIONS == 1 )
4800
4801 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
4802 {
4803 TCB_t * pxTCB;
4804 BaseType_t xReturn = pdPASS;
4805 uint8_t ucOriginalNotifyState;
4806
4807 configASSERT( xTaskToNotify );
4808 pxTCB = xTaskToNotify;
4809
4810 taskENTER_CRITICAL();
4811 {
4812 if( pulPreviousNotificationValue != NULL )
4813 {
4814 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4815 }
4816
4817 ucOriginalNotifyState = pxTCB->ucNotifyState;
4818
4819 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4820
4821 switch( eAction )
4822 {
4823 case eSetBits :
4824 pxTCB->ulNotifiedValue |= ulValue;
4825 break;
4826
4827 case eIncrement :
4828 ( pxTCB->ulNotifiedValue )++;
4829 break;
4830
4831 case eSetValueWithOverwrite :
4832 pxTCB->ulNotifiedValue = ulValue;
4833 break;
4834
4835 case eSetValueWithoutOverwrite :
4836 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4837 {
4838 pxTCB->ulNotifiedValue = ulValue;
4839 }
4840 else
4841 {
4842 /* The value could not be written to the task. */
4843 xReturn = pdFAIL;
4844 }
4845 break;
4846
4847 case eNoAction:
4848 /* The task is being notified without its notify value being
4849 updated. */
4850 break;
4851
4852 default:
4853 /* Should not get here if all enums are handled.
4854 Artificially force an assert by testing a value the
4855 compiler can't assume is const. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08004856 configASSERT( pxTCB->ulNotifiedValue == ~0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004857
4858 break;
4859 }
4860
4861 traceTASK_NOTIFY();
4862
4863 /* If the task is in the blocked state specifically to wait for a
4864 notification then unblock it now. */
4865 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4866 {
4867 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4868 prvAddTaskToReadyList( pxTCB );
4869
4870 /* The task should not have been on an event list. */
4871 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4872
4873 #if( configUSE_TICKLESS_IDLE != 0 )
4874 {
4875 /* If a task is blocked waiting for a notification then
4876 xNextTaskUnblockTime might be set to the blocked task's time
4877 out time. If the task is unblocked for a reason other than
4878 a timeout xNextTaskUnblockTime is normally left unchanged,
4879 because it will automatically get reset to a new value when
4880 the tick count equals xNextTaskUnblockTime. However if
4881 tickless idling is used it might be more important to enter
4882 sleep mode at the earliest possible time - so reset
4883 xNextTaskUnblockTime here to ensure it is updated at the
4884 earliest possible time. */
4885 prvResetNextTaskUnblockTime();
4886 }
4887 #endif
4888
4889 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4890 {
4891 /* The notified task has a priority above the currently
4892 executing task so a yield is required. */
4893 taskYIELD_IF_USING_PREEMPTION();
4894 }
4895 else
4896 {
4897 mtCOVERAGE_TEST_MARKER();
4898 }
4899 }
4900 else
4901 {
4902 mtCOVERAGE_TEST_MARKER();
4903 }
4904 }
4905 taskEXIT_CRITICAL();
4906
4907 return xReturn;
4908 }
4909
4910#endif /* configUSE_TASK_NOTIFICATIONS */
4911/*-----------------------------------------------------------*/
4912
4913#if( configUSE_TASK_NOTIFICATIONS == 1 )
4914
4915 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
4916 {
4917 TCB_t * pxTCB;
4918 uint8_t ucOriginalNotifyState;
4919 BaseType_t xReturn = pdPASS;
4920 UBaseType_t uxSavedInterruptStatus;
4921
4922 configASSERT( xTaskToNotify );
4923
4924 /* RTOS ports that support interrupt nesting have the concept of a
4925 maximum system call (or maximum API call) interrupt priority.
4926 Interrupts that are above the maximum system call priority are keep
4927 permanently enabled, even when the RTOS kernel is in a critical section,
4928 but cannot make any calls to FreeRTOS API functions. If configASSERT()
4929 is defined in FreeRTOSConfig.h then
4930 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4931 failure if a FreeRTOS API function is called from an interrupt that has
4932 been assigned a priority above the configured maximum system call
4933 priority. Only FreeRTOS functions that end in FromISR can be called
4934 from interrupts that have been assigned a priority at or (logically)
4935 below the maximum system call interrupt priority. FreeRTOS maintains a
4936 separate interrupt safe API to ensure interrupt entry is as fast and as
4937 simple as possible. More information (albeit Cortex-M specific) is
4938 provided on the following link:
4939 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
4940 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4941
4942 pxTCB = xTaskToNotify;
4943
4944 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
4945 {
4946 if( pulPreviousNotificationValue != NULL )
4947 {
4948 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4949 }
4950
4951 ucOriginalNotifyState = pxTCB->ucNotifyState;
4952 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4953
4954 switch( eAction )
4955 {
4956 case eSetBits :
4957 pxTCB->ulNotifiedValue |= ulValue;
4958 break;
4959
4960 case eIncrement :
4961 ( pxTCB->ulNotifiedValue )++;
4962 break;
4963
4964 case eSetValueWithOverwrite :
4965 pxTCB->ulNotifiedValue = ulValue;
4966 break;
4967
4968 case eSetValueWithoutOverwrite :
4969 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4970 {
4971 pxTCB->ulNotifiedValue = ulValue;
4972 }
4973 else
4974 {
4975 /* The value could not be written to the task. */
4976 xReturn = pdFAIL;
4977 }
4978 break;
4979
4980 case eNoAction :
4981 /* The task is being notified without its notify value being
4982 updated. */
4983 break;
4984
4985 default:
4986 /* Should not get here if all enums are handled.
4987 Artificially force an assert by testing a value the
4988 compiler can't assume is const. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08004989 configASSERT( pxTCB->ulNotifiedValue == ~0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004990 break;
4991 }
4992
4993 traceTASK_NOTIFY_FROM_ISR();
4994
4995 /* If the task is in the blocked state specifically to wait for a
4996 notification then unblock it now. */
4997 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4998 {
4999 /* The task should not have been on an event list. */
5000 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5001
5002 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5003 {
5004 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5005 prvAddTaskToReadyList( pxTCB );
5006 }
5007 else
5008 {
5009 /* The delayed and ready lists cannot be accessed, so hold
5010 this task pending until the scheduler is resumed. */
5011 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5012 }
5013
5014 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5015 {
5016 /* The notified task has a priority above the currently
5017 executing task so a yield is required. */
5018 if( pxHigherPriorityTaskWoken != NULL )
5019 {
5020 *pxHigherPriorityTaskWoken = pdTRUE;
5021 }
5022
5023 /* Mark that a yield is pending in case the user is not
5024 using the "xHigherPriorityTaskWoken" parameter to an ISR
5025 safe FreeRTOS function. */
5026 xYieldPending = pdTRUE;
5027 }
5028 else
5029 {
5030 mtCOVERAGE_TEST_MARKER();
5031 }
5032 }
5033 }
5034 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5035
5036 return xReturn;
5037 }
5038
5039#endif /* configUSE_TASK_NOTIFICATIONS */
5040/*-----------------------------------------------------------*/
5041
5042#if( configUSE_TASK_NOTIFICATIONS == 1 )
5043
5044 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5045 {
5046 TCB_t * pxTCB;
5047 uint8_t ucOriginalNotifyState;
5048 UBaseType_t uxSavedInterruptStatus;
5049
5050 configASSERT( xTaskToNotify );
5051
5052 /* RTOS ports that support interrupt nesting have the concept of a
5053 maximum system call (or maximum API call) interrupt priority.
5054 Interrupts that are above the maximum system call priority are keep
5055 permanently enabled, even when the RTOS kernel is in a critical section,
5056 but cannot make any calls to FreeRTOS API functions. If configASSERT()
5057 is defined in FreeRTOSConfig.h then
5058 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5059 failure if a FreeRTOS API function is called from an interrupt that has
5060 been assigned a priority above the configured maximum system call
5061 priority. Only FreeRTOS functions that end in FromISR can be called
5062 from interrupts that have been assigned a priority at or (logically)
5063 below the maximum system call interrupt priority. FreeRTOS maintains a
5064 separate interrupt safe API to ensure interrupt entry is as fast and as
5065 simple as possible. More information (albeit Cortex-M specific) is
5066 provided on the following link:
5067 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
5068 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5069
5070 pxTCB = xTaskToNotify;
5071
5072 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
5073 {
5074 ucOriginalNotifyState = pxTCB->ucNotifyState;
5075 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
5076
5077 /* 'Giving' is equivalent to incrementing a count in a counting
5078 semaphore. */
5079 ( pxTCB->ulNotifiedValue )++;
5080
5081 traceTASK_NOTIFY_GIVE_FROM_ISR();
5082
5083 /* If the task is in the blocked state specifically to wait for a
5084 notification then unblock it now. */
5085 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5086 {
5087 /* The task should not have been on an event list. */
5088 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5089
5090 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5091 {
5092 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5093 prvAddTaskToReadyList( pxTCB );
5094 }
5095 else
5096 {
5097 /* The delayed and ready lists cannot be accessed, so hold
5098 this task pending until the scheduler is resumed. */
5099 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5100 }
5101
5102 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5103 {
5104 /* The notified task has a priority above the currently
5105 executing task so a yield is required. */
5106 if( pxHigherPriorityTaskWoken != NULL )
5107 {
5108 *pxHigherPriorityTaskWoken = pdTRUE;
5109 }
5110
5111 /* Mark that a yield is pending in case the user is not
5112 using the "xHigherPriorityTaskWoken" parameter in an ISR
5113 safe FreeRTOS function. */
5114 xYieldPending = pdTRUE;
5115 }
5116 else
5117 {
5118 mtCOVERAGE_TEST_MARKER();
5119 }
5120 }
5121 }
5122 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5123 }
5124
5125#endif /* configUSE_TASK_NOTIFICATIONS */
5126
5127/*-----------------------------------------------------------*/
5128
5129#if( configUSE_TASK_NOTIFICATIONS == 1 )
5130
5131 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
5132 {
5133 TCB_t *pxTCB;
5134 BaseType_t xReturn;
5135
5136 /* If null is passed in here then it is the calling task that is having
5137 its notification state cleared. */
5138 pxTCB = prvGetTCBFromHandle( xTask );
5139
5140 taskENTER_CRITICAL();
5141 {
5142 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
5143 {
5144 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5145 xReturn = pdPASS;
5146 }
5147 else
5148 {
5149 xReturn = pdFAIL;
5150 }
5151 }
5152 taskEXIT_CRITICAL();
5153
5154 return xReturn;
5155 }
5156
5157#endif /* configUSE_TASK_NOTIFICATIONS */
5158/*-----------------------------------------------------------*/
5159
5160#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5161 TickType_t xTaskGetIdleRunTimeCounter( void )
5162 {
5163 return xIdleTaskHandle->ulRunTimeCounter;
5164 }
5165#endif
5166/*-----------------------------------------------------------*/
5167
5168static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
5169{
5170TickType_t xTimeToWake;
5171const TickType_t xConstTickCount = xTickCount;
5172
5173 #if( INCLUDE_xTaskAbortDelay == 1 )
5174 {
5175 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5176 reset to pdFALSE so it can be detected as having been set to pdTRUE
5177 when the task leaves the Blocked state. */
5178 pxCurrentTCB->ucDelayAborted = pdFALSE;
5179 }
5180 #endif
5181
5182 /* Remove the task from the ready list before adding it to the blocked list
5183 as the same list item is used for both lists. */
5184 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
5185 {
5186 /* The current task must be in a ready list, so there is no need to
5187 check, and the port reset macro can be called directly. */
5188 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5189 }
5190 else
5191 {
5192 mtCOVERAGE_TEST_MARKER();
5193 }
5194
5195 #if ( INCLUDE_vTaskSuspend == 1 )
5196 {
5197 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
5198 {
5199 /* Add the task to the suspended task list instead of a delayed task
5200 list to ensure it is not woken by a timing event. It will block
5201 indefinitely. */
5202 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
5203 }
5204 else
5205 {
5206 /* Calculate the time at which the task should be woken if the event
5207 does not occur. This may overflow but this doesn't matter, the
5208 kernel will manage it correctly. */
5209 xTimeToWake = xConstTickCount + xTicksToWait;
5210
5211 /* The list item will be inserted in wake time order. */
5212 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5213
5214 if( xTimeToWake < xConstTickCount )
5215 {
5216 /* Wake time has overflowed. Place this item in the overflow
5217 list. */
5218 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5219 }
5220 else
5221 {
5222 /* The wake time has not overflowed, so the current block list
5223 is used. */
5224 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5225
5226 /* If the task entering the blocked state was placed at the
5227 head of the list of blocked tasks then xNextTaskUnblockTime
5228 needs to be updated too. */
5229 if( xTimeToWake < xNextTaskUnblockTime )
5230 {
5231 xNextTaskUnblockTime = xTimeToWake;
5232 }
5233 else
5234 {
5235 mtCOVERAGE_TEST_MARKER();
5236 }
5237 }
5238 }
5239 }
5240 #else /* INCLUDE_vTaskSuspend */
5241 {
5242 /* Calculate the time at which the task should be woken if the event
5243 does not occur. This may overflow but this doesn't matter, the kernel
5244 will manage it correctly. */
5245 xTimeToWake = xConstTickCount + xTicksToWait;
5246
5247 /* The list item will be inserted in wake time order. */
5248 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5249
5250 if( xTimeToWake < xConstTickCount )
5251 {
5252 /* Wake time has overflowed. Place this item in the overflow list. */
5253 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5254 }
5255 else
5256 {
5257 /* The wake time has not overflowed, so the current block list is used. */
5258 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5259
5260 /* If the task entering the blocked state was placed at the head of the
5261 list of blocked tasks then xNextTaskUnblockTime needs to be updated
5262 too. */
5263 if( xTimeToWake < xNextTaskUnblockTime )
5264 {
5265 xNextTaskUnblockTime = xTimeToWake;
5266 }
5267 else
5268 {
5269 mtCOVERAGE_TEST_MARKER();
5270 }
5271 }
5272
5273 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
5274 ( void ) xCanBlockIndefinitely;
5275 }
5276 #endif /* INCLUDE_vTaskSuspend */
5277}
5278
5279/* Code below here allows additional code to be inserted into this source file,
5280especially where access to file scope functions and data is needed (for example
5281when performing module tests). */
5282
5283#ifdef FREERTOS_MODULE_TEST
5284 #include "tasks_test_access_functions.h"
5285#endif
5286
5287
5288#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
5289
5290 #include "freertos_tasks_c_additions.h"
5291
5292 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
5293 static void freertos_tasks_c_additions_init( void )
5294 {
5295 FREERTOS_TASKS_C_ADDITIONS_INIT();
5296 }
5297 #endif
5298
5299#endif
5300
shijie.xiong392d3962022-03-17 14:07:27 +08005301#if CONFIG_BACKTRACE
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08005302void task_stack_range(TaskHandle_t xTask, unsigned long *low, unsigned long *high);
5303void task_stack_range(TaskHandle_t xTask, unsigned long *low, unsigned long *high)
5304{
5305 TCB_t *pxTCB;
5306 pxTCB = prvGetTCBFromHandle( xTask );
5307 *low = (unsigned long)pxTCB->pxStack;
5308 *high = *low + pxTCB->uStackDepth * sizeof(StackType_t);
5309}
5310#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005311
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08005312#if ENABLE_KASAN
5313void kasan_enable_current(void)
5314{
5315 if (pxCurrentTCB)
5316 pxCurrentTCB->kasan_depth--;
5317}
5318
5319void kasan_disable_current(void)
5320{
5321 if (pxCurrentTCB)
5322 pxCurrentTCB->kasan_depth++;
5323}
5324
5325int kasan_current_enabled(void)
5326{
5327 if (pxCurrentTCB)
5328 return (pxCurrentTCB->kasan_depth<=0);
5329 return 0;
5330}
5331#endif
Xiaohu.Huangc9a7d4f2022-03-15 13:48:38 +08005332
5333/* Add include implement source code which depend on the inner elements */
5334#include "aml_tasks_ext.c"