blob: 0ca4b67b7e515c087f15b05966959a7bc2a7a238 [file] [log] [blame]
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001/*
2 * FreeRTOS Kernel V10.2.1
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * http://www.FreeRTOS.org
23 * http://aws.amazon.com/freertos
24 *
25 * 1 tab == 4 spaces!
26 */
27
28/* Standard includes. */
29#include <stdlib.h>
30#include <string.h>
31
32/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33all the API functions to use the MPU wrappers. That should only be done when
34task.h is included from an application file. */
35#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37/* FreeRTOS includes. */
38#include "FreeRTOS.h"
39#include "task.h"
40#include "timers.h"
41#include "stack_macros.h"
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +080042#if ENABLE_FTRACE
43#include "ftrace.h"
44#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +080045
46/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
47because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
48for the header files above, but not in this file, in order to generate the
49correct privileged Vs unprivileged linkage and placement. */
50#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
51
52/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
53functions but without including stdio.h here. */
54#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
55 /* At the bottom of this file are two optional functions that can be used
56 to generate human readable text from the raw data generated by the
57 uxTaskGetSystemState() function. Note the formatting functions are provided
58 for convenience only, and are NOT considered part of the kernel. */
59 #include <stdio.h>
60#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
61
62#if( configUSE_PREEMPTION == 0 )
63 /* If the cooperative scheduler is being used then a yield should not be
64 performed just because a higher priority task has been woken. */
65 #define taskYIELD_IF_USING_PREEMPTION()
66#else
67 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
68#endif
69
70/* Values that can be assigned to the ucNotifyState member of the TCB. */
71#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
72#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
73#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
74
75/*
76 * The value used to fill the stack of a task when the task is created. This
77 * is used purely for checking the high water mark for tasks.
78 */
79#define tskSTACK_FILL_BYTE ( 0xa5U )
80
81/* Bits used to recored how a task's stack and TCB were allocated. */
82#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
83#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
84#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
85
86/* If any of the following are set then task stacks are filled with a known
87value so the high water mark can be determined. If none of the following are
88set then don't fill the stack so there is no unnecessary dependency on memset. */
89#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
90 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
91#else
92 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
93#endif
94
95/*
96 * Macros used by vListTask to indicate which state a task is in.
97 */
98#define tskRUNNING_CHAR ( 'X' )
99#define tskBLOCKED_CHAR ( 'B' )
100#define tskREADY_CHAR ( 'R' )
101#define tskDELETED_CHAR ( 'D' )
102#define tskSUSPENDED_CHAR ( 'S' )
103
104/*
105 * Some kernel aware debuggers require the data the debugger needs access to be
106 * global, rather than file scope.
107 */
108#ifdef portREMOVE_STATIC_QUALIFIER
109 #define static
110#endif
111
112/* The name allocated to the Idle task. This can be overridden by defining
113configIDLE_TASK_NAME in FreeRTOSConfig.h. */
114#ifndef configIDLE_TASK_NAME
115 #define configIDLE_TASK_NAME "IDLE"
116#endif
117
118#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
119
120 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
121 performed in a generic way that is not optimised to any particular
122 microcontroller architecture. */
123
124 /* uxTopReadyPriority holds the priority of the highest priority ready
125 state task. */
126 #define taskRECORD_READY_PRIORITY( uxPriority ) \
127 { \
128 if( ( uxPriority ) > uxTopReadyPriority ) \
129 { \
130 uxTopReadyPriority = ( uxPriority ); \
131 } \
132 } /* taskRECORD_READY_PRIORITY */
133
134 /*-----------------------------------------------------------*/
135
136 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
137 { \
138 UBaseType_t uxTopPriority = uxTopReadyPriority; \
139 \
140 /* Find the highest priority queue that contains ready tasks. */ \
141 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
142 { \
143 configASSERT( uxTopPriority ); \
144 --uxTopPriority; \
145 } \
146 \
147 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
148 the same priority get an equal share of the processor time. */ \
149 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
150 uxTopReadyPriority = uxTopPriority; \
151 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
152
153 /*-----------------------------------------------------------*/
154
155 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
156 they are only required when a port optimised method of task selection is
157 being used. */
158 #define taskRESET_READY_PRIORITY( uxPriority )
159 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
160
161#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
162
163 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
164 performed in a way that is tailored to the particular microcontroller
165 architecture being used. */
166
167 /* A port optimised version is provided. Call the port defined macros. */
168 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
169
170 /*-----------------------------------------------------------*/
171
172 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
173 { \
174 UBaseType_t uxTopPriority; \
175 \
176 /* Find the highest priority list that contains ready tasks. */ \
177 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
178 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
179 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
180 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
181
182 /*-----------------------------------------------------------*/
183
184 /* A port optimised version is provided, call it only if the TCB being reset
185 is being referenced from a ready list. If it is referenced from a delayed
186 or suspended list then it won't be in a ready list. */
187 #define taskRESET_READY_PRIORITY( uxPriority ) \
188 { \
189 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
190 { \
191 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
192 } \
193 }
194
195#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
196
197/*-----------------------------------------------------------*/
198
199/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
200count overflows. */
201#define taskSWITCH_DELAYED_LISTS() \
202{ \
203 List_t *pxTemp; \
204 \
205 /* The delayed tasks list should be empty when the lists are switched. */ \
206 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
207 \
208 pxTemp = pxDelayedTaskList; \
209 pxDelayedTaskList = pxOverflowDelayedTaskList; \
210 pxOverflowDelayedTaskList = pxTemp; \
211 xNumOfOverflows++; \
212 prvResetNextTaskUnblockTime(); \
213}
214
215/*-----------------------------------------------------------*/
216
217/*
218 * Place the task represented by pxTCB into the appropriate ready list for
219 * the task. It is inserted at the end of the list.
220 */
221#define prvAddTaskToReadyList( pxTCB ) \
222 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
223 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
224 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
225 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
226/*-----------------------------------------------------------*/
227
228/*
229 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
230 * where NULL is used to indicate that the handle of the currently executing
231 * task should be used in place of the parameter. This macro simply checks to
232 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
233 */
234#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
235
236/* The item value of the event list item is normally used to hold the priority
237of the task to which it belongs (coded to allow it to be held in reverse
238priority order). However, it is occasionally borrowed for other purposes. It
239is important its value is not updated due to a task priority change while it is
240being used for another purpose. The following bit definition is used to inform
241the scheduler that the value should not be changed - in which case it is the
242responsibility of whichever module is using the value to ensure it gets set back
243to its original value when it is released. */
244#if( configUSE_16_BIT_TICKS == 1 )
245 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
246#else
247 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
248#endif
249
250/*
251 * Task control block. A task control block (TCB) is allocated for each task,
252 * and stores task state information, including a pointer to the task's context
253 * (the task's run time environment, including register values)
254 */
255typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
256{
257 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
258
259 #if ( portUSING_MPU_WRAPPERS == 1 )
260 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
261 #endif
262
263 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
264 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
265 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
266 StackType_t *pxStack; /*< Points to the start of the stack. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800267 StackType_t uStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800268 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
269
270 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
271 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
272 #endif
273
274 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
275 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
276 #endif
277
278 #if ( configUSE_TRACE_FACILITY == 1 )
279 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
280 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
281 #endif
282
283 #if ( configUSE_MUTEXES == 1 )
284 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
285 UBaseType_t uxMutexesHeld;
286 #endif
287
288 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
289 TaskHookFunction_t pxTaskTag;
290 #endif
291
292 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
293 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
294 #endif
295
296 #if( configGENERATE_RUN_TIME_STATS == 1 )
297 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
298 #endif
299
300 #if ( configUSE_NEWLIB_REENTRANT == 1 )
301 /* Allocate a Newlib reent structure that is specific to this task.
302 Note Newlib support has been included by popular demand, but is not
303 used by the FreeRTOS maintainers themselves. FreeRTOS is not
304 responsible for resulting newlib operation. User must be familiar with
305 newlib and must provide system-wide implementations of the necessary
306 stubs. Be warned that (at the time of writing) the current newlib design
307 implements a system-wide malloc() that must be provided with locks. */
308 struct _reent xNewLib_reent;
309 #endif
310
311 #if( configUSE_TASK_NOTIFICATIONS == 1 )
312 volatile uint32_t ulNotifiedValue;
313 volatile uint8_t ucNotifyState;
314 #endif
315
316 /* See the comments in FreeRTOS.h with the definition of
317 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
318 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
319 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
320 #endif
321
322 #if( INCLUDE_xTaskAbortDelay == 1 )
323 uint8_t ucDelayAborted;
324 #endif
325
326 #if( configUSE_POSIX_ERRNO == 1 )
327 int iTaskErrno;
328 #endif
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800329 #if ( configUSE_TASK_START_HOOK == 1 )
330 void *pxTaskFun;
331 void *pxTaskPara;
332 #endif
333 #if ENABLE_KASAN
334 int kasan_depth;
335 #endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800336} tskTCB;
337
338/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
339below to enable the use of older kernel aware debuggers. */
340typedef tskTCB TCB_t;
341
342/*lint -save -e956 A manual analysis and inspection has been used to determine
343which static variables must be declared volatile. */
344PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
345
346/* Lists for ready and blocked tasks. --------------------
347xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
348doing so breaks some kernel aware debuggers and debuggers that rely on removing
349the static qualifier. */
350PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
351PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
352PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
353PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
354PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
355PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
356
357#if( INCLUDE_vTaskDelete == 1 )
358
359 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
360 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
361
362#endif
363
364#if ( INCLUDE_vTaskSuspend == 1 )
365
366 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
367
368#endif
369
370/* Global POSIX errno. Its value is changed upon context switching to match
371the errno of the currently running task. */
372#if ( configUSE_POSIX_ERRNO == 1 )
373 int FreeRTOS_errno = 0;
374#endif
375
376/* Other file private variables. --------------------------------*/
377PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
378PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
379PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
380PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
381PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
382PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
383PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
384PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
385PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
386PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
387
388/* Context switches are held pending while the scheduler is suspended. Also,
389interrupts must not manipulate the xStateListItem of a TCB, or any of the
390lists the xStateListItem can be referenced from, if the scheduler is suspended.
391If an interrupt needs to unblock a task while the scheduler is suspended then it
392moves the task's event list item into the xPendingReadyList, ready for the
393kernel to move the task from the pending ready list into the real ready list
394when the scheduler is unsuspended. The pending ready list itself can only be
395accessed from a critical section. */
396PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
397
398#if ( configGENERATE_RUN_TIME_STATS == 1 )
399
400 /* Do not move these variables to function scope as doing so prevents the
401 code working with debuggers that need to remove the static qualifier. */
402 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
403 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
404
405#endif
406
407/*lint -restore */
408
409/*-----------------------------------------------------------*/
410
411/* Callback function prototypes. --------------------------*/
412#if( configCHECK_FOR_STACK_OVERFLOW > 0 )
413
414 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
415
416#endif
417
418#if( configUSE_TICK_HOOK > 0 )
419
420 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
421
422#endif
423
424#if( configSUPPORT_STATIC_ALLOCATION == 1 )
425
426 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
427
428#endif
429
430/* File private functions. --------------------------------*/
431
432/**
433 * Utility task that simply returns pdTRUE if the task referenced by xTask is
434 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
435 * is in any other state.
436 */
437#if ( INCLUDE_vTaskSuspend == 1 )
438
439 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
440
441#endif /* INCLUDE_vTaskSuspend */
442
443/*
444 * Utility to ready all the lists used by the scheduler. This is called
445 * automatically upon the creation of the first task.
446 */
447static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
448
449/*
450 * The idle task, which as all tasks is implemented as a never ending loop.
451 * The idle task is automatically created and added to the ready lists upon
452 * creation of the first user task.
453 *
454 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
455 * language extensions. The equivalent prototype for this function is:
456 *
457 * void prvIdleTask( void *pvParameters );
458 *
459 */
460static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
461
462/*
463 * Utility to free all memory allocated by the scheduler to hold a TCB,
464 * including the stack pointed to by the TCB.
465 *
466 * This does not free memory allocated by the task itself (i.e. memory
467 * allocated by calls to pvPortMalloc from within the tasks application code).
468 */
469#if ( INCLUDE_vTaskDelete == 1 )
470
471 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
472
473#endif
474
475/*
476 * Used only by the idle task. This checks to see if anything has been placed
477 * in the list of tasks waiting to be deleted. If so the task is cleaned up
478 * and its TCB deleted.
479 */
480static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
481
482/*
483 * The currently executing task is entering the Blocked state. Add the task to
484 * either the current or the overflow delayed task list.
485 */
486static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
487
488/*
489 * Fills an TaskStatus_t structure with information on each task that is
490 * referenced from the pxList list (which may be a ready list, a delayed list,
491 * a suspended list, etc.).
492 *
493 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
494 * NORMAL APPLICATION CODE.
495 */
496#if ( configUSE_TRACE_FACILITY == 1 )
497
498 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
499
500#endif
501
502/*
503 * Searches pxList for a task with name pcNameToQuery - returning a handle to
504 * the task if it is found, or NULL if the task is not found.
505 */
506#if ( INCLUDE_xTaskGetHandle == 1 )
507
508 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
509
510#endif
511
512/*
513 * When a task is created, the stack of the task is filled with a known value.
514 * This function determines the 'high water mark' of the task stack by
515 * determining how much of the stack remains at the original preset value.
516 */
517#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
518
519 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
520
521#endif
522
523/*
524 * Return the amount of time, in ticks, that will pass before the kernel will
525 * next move a task from the Blocked state to the Running state.
526 *
527 * This conditional compilation should use inequality to 0, not equality to 1.
528 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
529 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
530 * set to a value other than 1.
531 */
532#if ( configUSE_TICKLESS_IDLE != 0 )
533
534 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
535
536#endif
537
538/*
539 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
540 * will exit the Blocked state.
541 */
542static void prvResetNextTaskUnblockTime( void );
543
544#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
545
546 /*
547 * Helper function used to pad task names with spaces when printing out
548 * human readable tables of task information.
549 */
550 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
551
552#endif
553
554/*
555 * Called after a Task_t structure has been allocated either statically or
556 * dynamically to fill in the structure's members.
557 */
558static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
559 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
560 const uint32_t ulStackDepth,
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800561 void * pvParameters,
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800562 UBaseType_t uxPriority,
563 TaskHandle_t * const pxCreatedTask,
564 TCB_t *pxNewTCB,
565 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
566
567/*
568 * Called after a new task has been created and initialised to place the task
569 * under the control of the scheduler.
570 */
571static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
572
573/*
574 * freertos_tasks_c_additions_init() should only be called if the user definable
575 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
576 * called by the function.
577 */
578#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
579
580 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
581
582#endif
583
584/*-----------------------------------------------------------*/
585
586#if( configSUPPORT_STATIC_ALLOCATION == 1 )
587
588 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
589 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
590 const uint32_t ulStackDepth,
591 void * const pvParameters,
592 UBaseType_t uxPriority,
593 StackType_t * const puxStackBuffer,
594 StaticTask_t * const pxTaskBuffer )
595 {
596 TCB_t *pxNewTCB;
597 TaskHandle_t xReturn;
598
599 configASSERT( puxStackBuffer != NULL );
600 configASSERT( pxTaskBuffer != NULL );
601
602 #if( configASSERT_DEFINED == 1 )
603 {
604 /* Sanity check that the size of the structure used to declare a
605 variable of type StaticTask_t equals the size of the real task
606 structure. */
607 volatile size_t xSize = sizeof( StaticTask_t );
608 configASSERT( xSize == sizeof( TCB_t ) );
609 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
610 }
611 #endif /* configASSERT_DEFINED */
612
613
614 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
615 {
616 /* The memory used for the task's TCB and stack are passed into this
617 function - use them. */
618 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
619 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
620
621 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
622 {
623 /* Tasks can be created statically or dynamically, so note this
624 task was created statically in case the task is later deleted. */
625 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
626 }
627 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
628
629 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
630 prvAddNewTaskToReadyList( pxNewTCB );
631 }
632 else
633 {
634 xReturn = NULL;
635 }
636
637 return xReturn;
638 }
639
640#endif /* SUPPORT_STATIC_ALLOCATION */
641/*-----------------------------------------------------------*/
642
643#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
644
645 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
646 {
647 TCB_t *pxNewTCB;
648 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
649
650 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
651 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
652
653 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
654 {
655 /* Allocate space for the TCB. Where the memory comes from depends
656 on the implementation of the port malloc function and whether or
657 not static allocation is being used. */
658 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
659
660 /* Store the stack location in the TCB. */
661 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
662
663 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
664 {
665 /* Tasks can be created statically or dynamically, so note this
666 task was created statically in case the task is later deleted. */
667 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
668 }
669 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
670
671 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
672 pxTaskDefinition->pcName,
673 ( uint32_t ) pxTaskDefinition->usStackDepth,
674 pxTaskDefinition->pvParameters,
675 pxTaskDefinition->uxPriority,
676 pxCreatedTask, pxNewTCB,
677 pxTaskDefinition->xRegions );
678
679 prvAddNewTaskToReadyList( pxNewTCB );
680 xReturn = pdPASS;
681 }
682
683 return xReturn;
684 }
685
686#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
687/*-----------------------------------------------------------*/
688
689#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
690
691 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
692 {
693 TCB_t *pxNewTCB;
694 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
695
696 configASSERT( pxTaskDefinition->puxStackBuffer );
697
698 if( pxTaskDefinition->puxStackBuffer != NULL )
699 {
700 /* Allocate space for the TCB. Where the memory comes from depends
701 on the implementation of the port malloc function and whether or
702 not static allocation is being used. */
703 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
704
705 if( pxNewTCB != NULL )
706 {
707 /* Store the stack location in the TCB. */
708 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
709
710 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
711 {
712 /* Tasks can be created statically or dynamically, so note
713 this task had a statically allocated stack in case it is
714 later deleted. The TCB was allocated dynamically. */
715 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
716 }
717 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
718
719 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
720 pxTaskDefinition->pcName,
721 ( uint32_t ) pxTaskDefinition->usStackDepth,
722 pxTaskDefinition->pvParameters,
723 pxTaskDefinition->uxPriority,
724 pxCreatedTask, pxNewTCB,
725 pxTaskDefinition->xRegions );
726
727 prvAddNewTaskToReadyList( pxNewTCB );
728 xReturn = pdPASS;
729 }
730 }
731
732 return xReturn;
733 }
734
735#endif /* portUSING_MPU_WRAPPERS */
736/*-----------------------------------------------------------*/
737
738#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
739
740 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
741 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
742 const configSTACK_DEPTH_TYPE usStackDepth,
743 void * const pvParameters,
744 UBaseType_t uxPriority,
745 TaskHandle_t * const pxCreatedTask )
746 {
747 TCB_t *pxNewTCB;
748 BaseType_t xReturn;
749
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800750
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800751 /* If the stack grows down then allocate the stack then the TCB so the stack
752 does not grow into the TCB. Likewise if the stack grows up then allocate
753 the TCB then the stack. */
754 #if( portSTACK_GROWTH > 0 )
755 {
756 /* Allocate space for the TCB. Where the memory comes from depends on
757 the implementation of the port malloc function and whether or not static
758 allocation is being used. */
759 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
760
761 if( pxNewTCB != NULL )
762 {
763 /* Allocate space for the stack used by the task being created.
764 The base of the stack memory stored in the TCB so the task can
765 be deleted later if required. */
766 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
767
768 if( pxNewTCB->pxStack == NULL )
769 {
770 /* Could not allocate the stack. Delete the allocated TCB. */
771 vPortFree( pxNewTCB );
772 pxNewTCB = NULL;
773 }
774 }
775 }
776 #else /* portSTACK_GROWTH */
777 {
778 StackType_t *pxStack;
779
780 /* Allocate space for the stack used by the task being created. */
781 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
782
783 if( pxStack != NULL )
784 {
785 /* Allocate space for the TCB. */
786 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
787
788 if( pxNewTCB != NULL )
789 {
790 /* Store the stack location in the TCB. */
791 pxNewTCB->pxStack = pxStack;
792 }
793 else
794 {
795 /* The stack cannot be used as the TCB was not created. Free
796 it again. */
797 vPortFree( pxStack );
798 }
799 }
800 else
801 {
802 pxNewTCB = NULL;
803 }
804 }
805 #endif /* portSTACK_GROWTH */
806
807 if( pxNewTCB != NULL )
808 {
809 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
810 {
811 /* Tasks can be created statically or dynamically, so note this
812 task was created dynamically in case it is later deleted. */
813 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
814 }
815 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
816
817 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
818 prvAddNewTaskToReadyList( pxNewTCB );
819 xReturn = pdPASS;
820 }
821 else
822 {
823 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
824 }
825
826 return xReturn;
827 }
828
829#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
830/*-----------------------------------------------------------*/
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800831#if ( configUSE_TASK_START_HOOK == 1 )
832static void prvTaskFunWrp( void *para)
833{
834 TCB_t *pxNewTCB = (TCB_t *)para;
835 {
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800836
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800837 extern void vApplicationTaskStartHook( void );
838 vApplicationTaskStartHook();
839 }
840 ((TaskFunction_t)pxNewTCB->pxTaskFun)(pxNewTCB->pxTaskPara);
841}
842#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800843static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
844 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
845 const uint32_t ulStackDepth,
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800846 void * pvParameters,
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800847 UBaseType_t uxPriority,
848 TaskHandle_t * const pxCreatedTask,
849 TCB_t *pxNewTCB,
850 const MemoryRegion_t * const xRegions )
851{
852StackType_t *pxTopOfStack;
853UBaseType_t x;
854
855 #if( portUSING_MPU_WRAPPERS == 1 )
856 /* Should the task be created in privileged mode? */
857 BaseType_t xRunPrivileged;
858 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
859 {
860 xRunPrivileged = pdTRUE;
861 }
862 else
863 {
864 xRunPrivileged = pdFALSE;
865 }
866 uxPriority &= ~portPRIVILEGE_BIT;
867 #endif /* portUSING_MPU_WRAPPERS == 1 */
868
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +0800869 #if ENABLE_KASAN
870 pxNewTCB->kasan_depth = 0;
871 #endif
872
873 #if ( configUSE_TASK_START_HOOK == 1 )
874 pxNewTCB->pxTaskFun = pxTaskCode;
875 pxNewTCB->pxTaskPara = pvParameters;
876 pxTaskCode = prvTaskFunWrp;
877 pvParameters = pxNewTCB;
878 #endif
879
880 pxNewTCB->uStackDepth = ulStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +0800881 /* Avoid dependency on memset() if it is not required. */
882 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
883 {
884 /* Fill the stack with a known value to assist debugging. */
885 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
886 }
887 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
888
889 /* Calculate the top of stack address. This depends on whether the stack
890 grows from high memory to low (as per the 80x86) or vice versa.
891 portSTACK_GROWTH is used to make the result positive or negative as required
892 by the port. */
893 #if( portSTACK_GROWTH < 0 )
894 {
895 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
896 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
897
898 /* Check the alignment of the calculated top of stack is correct. */
899 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
900
901 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
902 {
903 /* Also record the stack's high address, which may assist
904 debugging. */
905 pxNewTCB->pxEndOfStack = pxTopOfStack;
906 }
907 #endif /* configRECORD_STACK_HIGH_ADDRESS */
908 }
909 #else /* portSTACK_GROWTH */
910 {
911 pxTopOfStack = pxNewTCB->pxStack;
912
913 /* Check the alignment of the stack buffer is correct. */
914 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
915
916 /* The other extreme of the stack space is required if stack checking is
917 performed. */
918 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
919 }
920 #endif /* portSTACK_GROWTH */
921
922 /* Store the task name in the TCB. */
923 if( pcName != NULL )
924 {
925 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
926 {
927 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
928
929 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
930 configMAX_TASK_NAME_LEN characters just in case the memory after the
931 string is not accessible (extremely unlikely). */
932 if( pcName[ x ] == ( char ) 0x00 )
933 {
934 break;
935 }
936 else
937 {
938 mtCOVERAGE_TEST_MARKER();
939 }
940 }
941
942 /* Ensure the name string is terminated in the case that the string length
943 was greater or equal to configMAX_TASK_NAME_LEN. */
944 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
945 }
946 else
947 {
948 /* The task has not been given a name, so just ensure there is a NULL
949 terminator when it is read out. */
950 pxNewTCB->pcTaskName[ 0 ] = 0x00;
951 }
952
953 /* This is used as an array index so must ensure it's not too large. First
954 remove the privilege bit if one is present. */
955 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
956 {
957 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
958 }
959 else
960 {
961 mtCOVERAGE_TEST_MARKER();
962 }
963
964 pxNewTCB->uxPriority = uxPriority;
965 #if ( configUSE_MUTEXES == 1 )
966 {
967 pxNewTCB->uxBasePriority = uxPriority;
968 pxNewTCB->uxMutexesHeld = 0;
969 }
970 #endif /* configUSE_MUTEXES */
971
972 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
973 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
974
975 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
976 back to the containing TCB from a generic item in a list. */
977 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
978
979 /* Event lists are always in priority order. */
980 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
981 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
982
983 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
984 {
985 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
986 }
987 #endif /* portCRITICAL_NESTING_IN_TCB */
988
989 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
990 {
991 pxNewTCB->pxTaskTag = NULL;
992 }
993 #endif /* configUSE_APPLICATION_TASK_TAG */
994
995 #if ( configGENERATE_RUN_TIME_STATS == 1 )
996 {
997 pxNewTCB->ulRunTimeCounter = 0UL;
998 }
999 #endif /* configGENERATE_RUN_TIME_STATS */
1000
1001 #if ( portUSING_MPU_WRAPPERS == 1 )
1002 {
1003 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1004 }
1005 #else
1006 {
1007 /* Avoid compiler warning about unreferenced parameter. */
1008 ( void ) xRegions;
1009 }
1010 #endif
1011
1012 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1013 {
1014 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1015 {
1016 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1017 }
1018 }
1019 #endif
1020
1021 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1022 {
1023 pxNewTCB->ulNotifiedValue = 0;
1024 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1025 }
1026 #endif
1027
1028 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1029 {
1030 /* Initialise this task's Newlib reent structure. */
1031 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
1032 }
1033 #endif
1034
1035 #if( INCLUDE_xTaskAbortDelay == 1 )
1036 {
1037 pxNewTCB->ucDelayAborted = pdFALSE;
1038 }
1039 #endif
1040
1041 /* Initialize the TCB stack to look as if the task was already running,
1042 but had been interrupted by the scheduler. The return address is set
1043 to the start of the task function. Once the stack has been initialised
1044 the top of stack variable is updated. */
1045 #if( portUSING_MPU_WRAPPERS == 1 )
1046 {
1047 /* If the port has capability to detect stack overflow,
1048 pass the stack end address to the stack initialization
1049 function as well. */
1050 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1051 {
1052 #if( portSTACK_GROWTH < 0 )
1053 {
1054 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1055 }
1056 #else /* portSTACK_GROWTH */
1057 {
1058 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1059 }
1060 #endif /* portSTACK_GROWTH */
1061 }
1062 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1063 {
1064 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1065 }
1066 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1067 }
1068 #else /* portUSING_MPU_WRAPPERS */
1069 {
1070 /* If the port has capability to detect stack overflow,
1071 pass the stack end address to the stack initialization
1072 function as well. */
1073 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1074 {
1075 #if( portSTACK_GROWTH < 0 )
1076 {
1077 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1078 }
1079 #else /* portSTACK_GROWTH */
1080 {
1081 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1082 }
1083 #endif /* portSTACK_GROWTH */
1084 }
1085 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1086 {
1087 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1088 }
1089 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1090 }
1091 #endif /* portUSING_MPU_WRAPPERS */
1092
1093 if( pxCreatedTask != NULL )
1094 {
1095 /* Pass the handle out in an anonymous way. The handle can be used to
1096 change the created task's priority, delete the created task, etc.*/
1097 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1098 }
1099 else
1100 {
1101 mtCOVERAGE_TEST_MARKER();
1102 }
1103}
1104/*-----------------------------------------------------------*/
shijie.xiong392d3962022-03-17 14:07:27 +08001105#ifdef CONFIG_DMALLOC
1106extern struct MemLeak MemLeak_t[CONFIG_DMALLOC_SIZE];
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001107#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001108
1109static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
1110{
1111 /* Ensure interrupts don't access the task lists while the lists are being
1112 updated. */
1113 taskENTER_CRITICAL();
1114 {
1115 uxCurrentNumberOfTasks++;
1116 if( pxCurrentTCB == NULL )
1117 {
1118 /* There are no other tasks, or all the other tasks are in
1119 the suspended state - make this the current task. */
1120 pxCurrentTCB = pxNewTCB;
1121
1122 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1123 {
1124 /* This is the first task to be created so do the preliminary
1125 initialisation required. We will not recover if this call
1126 fails, but we will report the failure. */
1127 prvInitialiseTaskLists();
1128 }
1129 else
1130 {
1131 mtCOVERAGE_TEST_MARKER();
1132 }
1133 }
1134 else
1135 {
1136 /* If the scheduler is not already running, make this task the
1137 current task if it is the highest priority task to be created
1138 so far. */
1139 if( xSchedulerRunning == pdFALSE )
1140 {
1141 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1142 {
1143 pxCurrentTCB = pxNewTCB;
1144 }
1145 else
1146 {
1147 mtCOVERAGE_TEST_MARKER();
1148 }
1149 }
1150 else
1151 {
1152 mtCOVERAGE_TEST_MARKER();
1153 }
1154 }
1155
shijie.xiong392d3962022-03-17 14:07:27 +08001156#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001157 int i = 0;
1158
1159 if ( xSchedulerRunning != pdFALSE ) {
shijie.xiong392d3962022-03-17 14:07:27 +08001160 for (i = 1; i < CONFIG_DMALLOC_SIZE; i ++) {
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001161 if (MemLeak_t[i].Flag == 0) {
1162 uxTaskNumber = i;
1163 break;
1164 }
1165 }
1166
shijie.xiong392d3962022-03-17 14:07:27 +08001167 configASSERT( i < CONFIG_DMALLOC_SIZE );
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001168 } else {
1169 uxTaskNumber ++;
1170 }
1171 MemLeak_t[uxTaskNumber].Flag = 1;
1172#else
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001173 uxTaskNumber++;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001174#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001175
1176 #if ( configUSE_TRACE_FACILITY == 1 )
1177 {
1178 /* Add a counter into the TCB for tracing only. */
1179 pxNewTCB->uxTCBNumber = uxTaskNumber;
1180 }
1181 #endif /* configUSE_TRACE_FACILITY */
1182 traceTASK_CREATE( pxNewTCB );
1183
1184 prvAddTaskToReadyList( pxNewTCB );
1185
1186 portSETUP_TCB( pxNewTCB );
1187 }
1188 taskEXIT_CRITICAL();
1189
1190 if( xSchedulerRunning != pdFALSE )
1191 {
1192 /* If the created task is of a higher priority than the current task
1193 then it should run now. */
1194 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
1195 {
1196 taskYIELD_IF_USING_PREEMPTION();
1197 }
1198 else
1199 {
1200 mtCOVERAGE_TEST_MARKER();
1201 }
1202 }
1203 else
1204 {
1205 mtCOVERAGE_TEST_MARKER();
1206 }
1207}
1208/*-----------------------------------------------------------*/
1209
1210#if ( INCLUDE_vTaskDelete == 1 )
1211
1212 void vTaskDelete( TaskHandle_t xTaskToDelete )
1213 {
1214 TCB_t *pxTCB;
1215
1216 taskENTER_CRITICAL();
1217 {
1218 /* If null is passed in here then it is the calling task that is
1219 being deleted. */
1220 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1221
1222 /* Remove task from the ready list. */
1223 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1224 {
1225 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1226 }
1227 else
1228 {
1229 mtCOVERAGE_TEST_MARKER();
1230 }
1231
1232 /* Is the task waiting on an event also? */
1233 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1234 {
1235 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1236 }
1237 else
1238 {
1239 mtCOVERAGE_TEST_MARKER();
1240 }
1241
1242 /* Increment the uxTaskNumber also so kernel aware debuggers can
1243 detect that the task lists need re-generating. This is done before
1244 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1245 not return. */
1246 uxTaskNumber++;
1247
1248 if( pxTCB == pxCurrentTCB )
1249 {
1250 /* A task is deleting itself. This cannot complete within the
1251 task itself, as a context switch to another task is required.
1252 Place the task in the termination list. The idle task will
1253 check the termination list and free up any memory allocated by
1254 the scheduler for the TCB and stack of the deleted task. */
1255 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1256
1257 /* Increment the ucTasksDeleted variable so the idle task knows
1258 there is a task that has been deleted and that it should therefore
1259 check the xTasksWaitingTermination list. */
1260 ++uxDeletedTasksWaitingCleanUp;
1261
1262 /* The pre-delete hook is primarily for the Windows simulator,
1263 in which Windows specific clean up operations are performed,
1264 after which it is not possible to yield away from this task -
1265 hence xYieldPending is used to latch that a context switch is
1266 required. */
1267 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1268 }
1269 else
1270 {
1271 --uxCurrentNumberOfTasks;
1272 prvDeleteTCB( pxTCB );
1273
1274 /* Reset the next expected unblock time in case it referred to
1275 the task that has just been deleted. */
1276 prvResetNextTaskUnblockTime();
1277 }
1278
1279 traceTASK_DELETE( pxTCB );
shijie.xiong392d3962022-03-17 14:07:27 +08001280#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08001281 MemLeak_t[pxTCB->uxTCBNumber].Flag = 0;
1282 MemLeak_t[pxTCB->uxTCBNumber].TaskNum = 0;
1283 MemLeak_t[pxTCB->uxTCBNumber].WantSize = 0;
1284 MemLeak_t[pxTCB->uxTCBNumber].WantTotalSize = 0;
1285 MemLeak_t[pxTCB->uxTCBNumber].MallocCount = 0;
1286 if (MemLeak_t[pxTCB->uxTCBNumber].TaskName)
1287 memset(MemLeak_t[pxTCB->uxTCBNumber].TaskName, 0, 20);
1288 MemLeak_t[pxTCB->uxTCBNumber].FreeSize = 0;
1289 MemLeak_t[pxTCB->uxTCBNumber].FreeTotalSize = 0;
1290 MemLeak_t[pxTCB->uxTCBNumber].FreeCount = 0;
1291#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001292 }
1293 taskEXIT_CRITICAL();
1294
1295 /* Force a reschedule if it is the currently running task that has just
1296 been deleted. */
1297 if( xSchedulerRunning != pdFALSE )
1298 {
1299 if( pxTCB == pxCurrentTCB )
1300 {
1301 configASSERT( uxSchedulerSuspended == 0 );
1302 portYIELD_WITHIN_API();
1303 }
1304 else
1305 {
1306 mtCOVERAGE_TEST_MARKER();
1307 }
1308 }
1309 }
1310
1311#endif /* INCLUDE_vTaskDelete */
1312/*-----------------------------------------------------------*/
1313
1314#if ( INCLUDE_vTaskDelayUntil == 1 )
1315
1316 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1317 {
1318 TickType_t xTimeToWake;
1319 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
1320
1321 configASSERT( pxPreviousWakeTime );
1322 configASSERT( ( xTimeIncrement > 0U ) );
1323 configASSERT( uxSchedulerSuspended == 0 );
1324
1325 vTaskSuspendAll();
1326 {
1327 /* Minor optimisation. The tick count cannot change in this
1328 block. */
1329 const TickType_t xConstTickCount = xTickCount;
1330
1331 /* Generate the tick time at which the task wants to wake. */
1332 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1333
1334 if( xConstTickCount < *pxPreviousWakeTime )
1335 {
1336 /* The tick count has overflowed since this function was
1337 lasted called. In this case the only time we should ever
1338 actually delay is if the wake time has also overflowed,
1339 and the wake time is greater than the tick time. When this
1340 is the case it is as if neither time had overflowed. */
1341 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1342 {
1343 xShouldDelay = pdTRUE;
1344 }
1345 else
1346 {
1347 mtCOVERAGE_TEST_MARKER();
1348 }
1349 }
1350 else
1351 {
1352 /* The tick time has not overflowed. In this case we will
1353 delay if either the wake time has overflowed, and/or the
1354 tick time is less than the wake time. */
1355 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1356 {
1357 xShouldDelay = pdTRUE;
1358 }
1359 else
1360 {
1361 mtCOVERAGE_TEST_MARKER();
1362 }
1363 }
1364
1365 /* Update the wake time ready for the next call. */
1366 *pxPreviousWakeTime = xTimeToWake;
1367
1368 if( xShouldDelay != pdFALSE )
1369 {
1370 traceTASK_DELAY_UNTIL( xTimeToWake );
1371
1372 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1373 the time to wake, so subtract the current tick count. */
1374 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
1375 }
1376 else
1377 {
1378 mtCOVERAGE_TEST_MARKER();
1379 }
1380 }
1381 xAlreadyYielded = xTaskResumeAll();
1382
1383 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1384 have put ourselves to sleep. */
1385 if( xAlreadyYielded == pdFALSE )
1386 {
1387 portYIELD_WITHIN_API();
1388 }
1389 else
1390 {
1391 mtCOVERAGE_TEST_MARKER();
1392 }
1393 }
1394
1395#endif /* INCLUDE_vTaskDelayUntil */
1396/*-----------------------------------------------------------*/
1397
1398#if ( INCLUDE_vTaskDelay == 1 )
1399
1400 void vTaskDelay( const TickType_t xTicksToDelay )
1401 {
1402 BaseType_t xAlreadyYielded = pdFALSE;
1403
1404 /* A delay time of zero just forces a reschedule. */
1405 if( xTicksToDelay > ( TickType_t ) 0U )
1406 {
1407 configASSERT( uxSchedulerSuspended == 0 );
1408 vTaskSuspendAll();
1409 {
1410 traceTASK_DELAY();
1411
1412 /* A task that is removed from the event list while the
1413 scheduler is suspended will not get placed in the ready
1414 list or removed from the blocked list until the scheduler
1415 is resumed.
1416
1417 This task cannot be in an event list as it is the currently
1418 executing task. */
1419 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
1420 }
1421 xAlreadyYielded = xTaskResumeAll();
1422 }
1423 else
1424 {
1425 mtCOVERAGE_TEST_MARKER();
1426 }
1427
1428 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1429 have put ourselves to sleep. */
1430 if( xAlreadyYielded == pdFALSE )
1431 {
1432 portYIELD_WITHIN_API();
1433 }
1434 else
1435 {
1436 mtCOVERAGE_TEST_MARKER();
1437 }
1438 }
1439
1440#endif /* INCLUDE_vTaskDelay */
1441/*-----------------------------------------------------------*/
1442
1443#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1444
1445 eTaskState eTaskGetState( TaskHandle_t xTask )
1446 {
1447 eTaskState eReturn;
1448 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
1449 const TCB_t * const pxTCB = xTask;
1450
1451 configASSERT( pxTCB );
1452
1453 if( pxTCB == pxCurrentTCB )
1454 {
1455 /* The task calling this function is querying its own state. */
1456 eReturn = eRunning;
1457 }
1458 else
1459 {
1460 taskENTER_CRITICAL();
1461 {
1462 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1463 pxDelayedList = pxDelayedTaskList;
1464 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1465 }
1466 taskEXIT_CRITICAL();
1467
1468 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1469 {
1470 /* The task being queried is referenced from one of the Blocked
1471 lists. */
1472 eReturn = eBlocked;
1473 }
1474
1475 #if ( INCLUDE_vTaskSuspend == 1 )
1476 else if( pxStateList == &xSuspendedTaskList )
1477 {
1478 /* The task being queried is referenced from the suspended
1479 list. Is it genuinely suspended or is it blocked
1480 indefinitely? */
1481 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1482 {
1483 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1484 {
1485 /* The task does not appear on the event list item of
1486 and of the RTOS objects, but could still be in the
1487 blocked state if it is waiting on its notification
1488 rather than waiting on an object. */
1489 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1490 {
1491 eReturn = eBlocked;
1492 }
1493 else
1494 {
1495 eReturn = eSuspended;
1496 }
1497 }
1498 #else
1499 {
1500 eReturn = eSuspended;
1501 }
1502 #endif
1503 }
1504 else
1505 {
1506 eReturn = eBlocked;
1507 }
1508 }
1509 #endif
1510
1511 #if ( INCLUDE_vTaskDelete == 1 )
1512 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1513 {
1514 /* The task being queried is referenced from the deleted
1515 tasks list, or it is not referenced from any lists at
1516 all. */
1517 eReturn = eDeleted;
1518 }
1519 #endif
1520
1521 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1522 {
1523 /* If the task is not in any other state, it must be in the
1524 Ready (including pending ready) state. */
1525 eReturn = eReady;
1526 }
1527 }
1528
1529 return eReturn;
1530 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1531
1532#endif /* INCLUDE_eTaskGetState */
1533/*-----------------------------------------------------------*/
1534
1535#if ( INCLUDE_uxTaskPriorityGet == 1 )
1536
1537 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1538 {
1539 TCB_t const *pxTCB;
1540 UBaseType_t uxReturn;
1541
1542 taskENTER_CRITICAL();
1543 {
1544 /* If null is passed in here then it is the priority of the task
1545 that called uxTaskPriorityGet() that is being queried. */
1546 pxTCB = prvGetTCBFromHandle( xTask );
1547 uxReturn = pxTCB->uxPriority;
1548 }
1549 taskEXIT_CRITICAL();
1550
1551 return uxReturn;
1552 }
1553
1554#endif /* INCLUDE_uxTaskPriorityGet */
1555/*-----------------------------------------------------------*/
1556
1557#if ( INCLUDE_uxTaskPriorityGet == 1 )
1558
1559 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1560 {
1561 TCB_t const *pxTCB;
1562 UBaseType_t uxReturn, uxSavedInterruptState;
1563
1564 /* RTOS ports that support interrupt nesting have the concept of a
1565 maximum system call (or maximum API call) interrupt priority.
1566 Interrupts that are above the maximum system call priority are keep
1567 permanently enabled, even when the RTOS kernel is in a critical section,
1568 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1569 is defined in FreeRTOSConfig.h then
1570 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1571 failure if a FreeRTOS API function is called from an interrupt that has
1572 been assigned a priority above the configured maximum system call
1573 priority. Only FreeRTOS functions that end in FromISR can be called
1574 from interrupts that have been assigned a priority at or (logically)
1575 below the maximum system call interrupt priority. FreeRTOS maintains a
1576 separate interrupt safe API to ensure interrupt entry is as fast and as
1577 simple as possible. More information (albeit Cortex-M specific) is
1578 provided on the following link:
1579 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1580 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1581
1582 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
1583 {
1584 /* If null is passed in here then it is the priority of the calling
1585 task that is being queried. */
1586 pxTCB = prvGetTCBFromHandle( xTask );
1587 uxReturn = pxTCB->uxPriority;
1588 }
1589 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
1590
1591 return uxReturn;
1592 }
1593
1594#endif /* INCLUDE_uxTaskPriorityGet */
1595/*-----------------------------------------------------------*/
1596
1597#if ( INCLUDE_vTaskPrioritySet == 1 )
1598
1599 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1600 {
1601 TCB_t *pxTCB;
1602 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1603 BaseType_t xYieldRequired = pdFALSE;
1604
1605 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1606
1607 /* Ensure the new priority is valid. */
1608 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1609 {
1610 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1611 }
1612 else
1613 {
1614 mtCOVERAGE_TEST_MARKER();
1615 }
1616
1617 taskENTER_CRITICAL();
1618 {
1619 /* If null is passed in here then it is the priority of the calling
1620 task that is being changed. */
1621 pxTCB = prvGetTCBFromHandle( xTask );
1622
1623 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1624
1625 #if ( configUSE_MUTEXES == 1 )
1626 {
1627 uxCurrentBasePriority = pxTCB->uxBasePriority;
1628 }
1629 #else
1630 {
1631 uxCurrentBasePriority = pxTCB->uxPriority;
1632 }
1633 #endif
1634
1635 if( uxCurrentBasePriority != uxNewPriority )
1636 {
1637 /* The priority change may have readied a task of higher
1638 priority than the calling task. */
1639 if( uxNewPriority > uxCurrentBasePriority )
1640 {
1641 if( pxTCB != pxCurrentTCB )
1642 {
1643 /* The priority of a task other than the currently
1644 running task is being raised. Is the priority being
1645 raised above that of the running task? */
1646 if( uxNewPriority >= pxCurrentTCB->uxPriority )
1647 {
1648 xYieldRequired = pdTRUE;
1649 }
1650 else
1651 {
1652 mtCOVERAGE_TEST_MARKER();
1653 }
1654 }
1655 else
1656 {
1657 /* The priority of the running task is being raised,
1658 but the running task must already be the highest
1659 priority task able to run so no yield is required. */
1660 }
1661 }
1662 else if( pxTCB == pxCurrentTCB )
1663 {
1664 /* Setting the priority of the running task down means
1665 there may now be another task of higher priority that
1666 is ready to execute. */
1667 xYieldRequired = pdTRUE;
1668 }
1669 else
1670 {
1671 /* Setting the priority of any other task down does not
1672 require a yield as the running task must be above the
1673 new priority of the task being modified. */
1674 }
1675
1676 /* Remember the ready list the task might be referenced from
1677 before its uxPriority member is changed so the
1678 taskRESET_READY_PRIORITY() macro can function correctly. */
1679 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1680
1681 #if ( configUSE_MUTEXES == 1 )
1682 {
1683 /* Only change the priority being used if the task is not
1684 currently using an inherited priority. */
1685 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1686 {
1687 pxTCB->uxPriority = uxNewPriority;
1688 }
1689 else
1690 {
1691 mtCOVERAGE_TEST_MARKER();
1692 }
1693
1694 /* The base priority gets set whatever. */
1695 pxTCB->uxBasePriority = uxNewPriority;
1696 }
1697 #else
1698 {
1699 pxTCB->uxPriority = uxNewPriority;
1700 }
1701 #endif
1702
1703 /* Only reset the event list item value if the value is not
1704 being used for anything else. */
1705 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1706 {
1707 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1708 }
1709 else
1710 {
1711 mtCOVERAGE_TEST_MARKER();
1712 }
1713
1714 /* If the task is in the blocked or suspended list we need do
1715 nothing more than change its priority variable. However, if
1716 the task is in a ready list it needs to be removed and placed
1717 in the list appropriate to its new priority. */
1718 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1719 {
1720 /* The task is currently in its ready list - remove before
1721 adding it to it's new ready list. As we are in a critical
1722 section we can do this even if the scheduler is suspended. */
1723 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1724 {
1725 /* It is known that the task is in its ready list so
1726 there is no need to check again and the port level
1727 reset macro can be called directly. */
1728 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1729 }
1730 else
1731 {
1732 mtCOVERAGE_TEST_MARKER();
1733 }
1734 prvAddTaskToReadyList( pxTCB );
1735 }
1736 else
1737 {
1738 mtCOVERAGE_TEST_MARKER();
1739 }
1740
1741 if( xYieldRequired != pdFALSE )
1742 {
1743 taskYIELD_IF_USING_PREEMPTION();
1744 }
1745 else
1746 {
1747 mtCOVERAGE_TEST_MARKER();
1748 }
1749
1750 /* Remove compiler warning about unused variables when the port
1751 optimised task selection is not being used. */
1752 ( void ) uxPriorityUsedOnEntry;
1753 }
1754 }
1755 taskEXIT_CRITICAL();
1756 }
1757
1758#endif /* INCLUDE_vTaskPrioritySet */
1759/*-----------------------------------------------------------*/
1760
1761#if ( INCLUDE_vTaskSuspend == 1 )
1762
1763 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1764 {
1765 TCB_t *pxTCB;
1766
1767 taskENTER_CRITICAL();
1768 {
1769 /* If null is passed in here then it is the running task that is
1770 being suspended. */
1771 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1772
1773 traceTASK_SUSPEND( pxTCB );
1774
1775 /* Remove task from the ready/delayed list and place in the
1776 suspended list. */
1777 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1778 {
1779 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1780 }
1781 else
1782 {
1783 mtCOVERAGE_TEST_MARKER();
1784 }
1785
1786 /* Is the task waiting on an event also? */
1787 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1788 {
1789 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1790 }
1791 else
1792 {
1793 mtCOVERAGE_TEST_MARKER();
1794 }
1795
1796 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1797
1798 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1799 {
1800 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1801 {
1802 /* The task was blocked to wait for a notification, but is
1803 now suspended, so no notification was received. */
1804 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1805 }
1806 }
1807 #endif
1808 }
1809 taskEXIT_CRITICAL();
1810
1811 if( xSchedulerRunning != pdFALSE )
1812 {
1813 /* Reset the next expected unblock time in case it referred to the
1814 task that is now in the Suspended state. */
1815 taskENTER_CRITICAL();
1816 {
1817 prvResetNextTaskUnblockTime();
1818 }
1819 taskEXIT_CRITICAL();
1820 }
1821 else
1822 {
1823 mtCOVERAGE_TEST_MARKER();
1824 }
1825
1826 if( pxTCB == pxCurrentTCB )
1827 {
1828 if( xSchedulerRunning != pdFALSE )
1829 {
1830 /* The current task has just been suspended. */
1831 configASSERT( uxSchedulerSuspended == 0 );
1832 portYIELD_WITHIN_API();
1833 }
1834 else
1835 {
1836 /* The scheduler is not running, but the task that was pointed
1837 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1838 must be adjusted to point to a different task. */
1839 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1840 {
1841 /* No other tasks are ready, so set pxCurrentTCB back to
1842 NULL so when the next task is created pxCurrentTCB will
1843 be set to point to it no matter what its relative priority
1844 is. */
1845 pxCurrentTCB = NULL;
1846 }
1847 else
1848 {
1849 vTaskSwitchContext();
1850 }
1851 }
1852 }
1853 else
1854 {
1855 mtCOVERAGE_TEST_MARKER();
1856 }
1857 }
1858
1859#endif /* INCLUDE_vTaskSuspend */
1860/*-----------------------------------------------------------*/
1861
1862#if ( INCLUDE_vTaskSuspend == 1 )
1863
1864 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1865 {
1866 BaseType_t xReturn = pdFALSE;
1867 const TCB_t * const pxTCB = xTask;
1868
1869 /* Accesses xPendingReadyList so must be called from a critical
1870 section. */
1871
1872 /* It does not make sense to check if the calling task is suspended. */
1873 configASSERT( xTask );
1874
1875 /* Is the task being resumed actually in the suspended list? */
1876 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
1877 {
1878 /* Has the task already been resumed from within an ISR? */
1879 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
1880 {
1881 /* Is it in the suspended list because it is in the Suspended
1882 state, or because is is blocked with no timeout? */
1883 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
1884 {
1885 xReturn = pdTRUE;
1886 }
1887 else
1888 {
1889 mtCOVERAGE_TEST_MARKER();
1890 }
1891 }
1892 else
1893 {
1894 mtCOVERAGE_TEST_MARKER();
1895 }
1896 }
1897 else
1898 {
1899 mtCOVERAGE_TEST_MARKER();
1900 }
1901
1902 return xReturn;
1903 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1904
1905#endif /* INCLUDE_vTaskSuspend */
1906/*-----------------------------------------------------------*/
1907
1908#if ( INCLUDE_vTaskSuspend == 1 )
1909
1910 void vTaskResume( TaskHandle_t xTaskToResume )
1911 {
1912 TCB_t * const pxTCB = xTaskToResume;
1913
1914 /* It does not make sense to resume the calling task. */
1915 configASSERT( xTaskToResume );
1916
1917 /* The parameter cannot be NULL as it is impossible to resume the
1918 currently executing task. */
1919 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
1920 {
1921 taskENTER_CRITICAL();
1922 {
1923 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1924 {
1925 traceTASK_RESUME( pxTCB );
1926
1927 /* The ready list can be accessed even if the scheduler is
1928 suspended because this is inside a critical section. */
1929 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1930 prvAddTaskToReadyList( pxTCB );
1931
1932 /* A higher priority task may have just been resumed. */
1933 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1934 {
1935 /* This yield may not cause the task just resumed to run,
1936 but will leave the lists in the correct state for the
1937 next yield. */
1938 taskYIELD_IF_USING_PREEMPTION();
1939 }
1940 else
1941 {
1942 mtCOVERAGE_TEST_MARKER();
1943 }
1944 }
1945 else
1946 {
1947 mtCOVERAGE_TEST_MARKER();
1948 }
1949 }
1950 taskEXIT_CRITICAL();
1951 }
1952 else
1953 {
1954 mtCOVERAGE_TEST_MARKER();
1955 }
1956 }
1957
1958#endif /* INCLUDE_vTaskSuspend */
1959
1960/*-----------------------------------------------------------*/
1961
1962#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1963
1964 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1965 {
1966 BaseType_t xYieldRequired = pdFALSE;
1967 TCB_t * const pxTCB = xTaskToResume;
1968 UBaseType_t uxSavedInterruptStatus;
1969
1970 configASSERT( xTaskToResume );
1971
1972 /* RTOS ports that support interrupt nesting have the concept of a
1973 maximum system call (or maximum API call) interrupt priority.
1974 Interrupts that are above the maximum system call priority are keep
1975 permanently enabled, even when the RTOS kernel is in a critical section,
1976 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1977 is defined in FreeRTOSConfig.h then
1978 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1979 failure if a FreeRTOS API function is called from an interrupt that has
1980 been assigned a priority above the configured maximum system call
1981 priority. Only FreeRTOS functions that end in FromISR can be called
1982 from interrupts that have been assigned a priority at or (logically)
1983 below the maximum system call interrupt priority. FreeRTOS maintains a
1984 separate interrupt safe API to ensure interrupt entry is as fast and as
1985 simple as possible. More information (albeit Cortex-M specific) is
1986 provided on the following link:
1987 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1988 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1989
1990 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1991 {
1992 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1993 {
1994 traceTASK_RESUME_FROM_ISR( pxTCB );
1995
1996 /* Check the ready lists can be accessed. */
1997 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
1998 {
1999 /* Ready lists can be accessed so move the task from the
2000 suspended list to the ready list directly. */
2001 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2002 {
2003 xYieldRequired = pdTRUE;
2004 }
2005 else
2006 {
2007 mtCOVERAGE_TEST_MARKER();
2008 }
2009
2010 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2011 prvAddTaskToReadyList( pxTCB );
2012 }
2013 else
2014 {
2015 /* The delayed or ready lists cannot be accessed so the task
2016 is held in the pending ready list until the scheduler is
2017 unsuspended. */
2018 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
2019 }
2020 }
2021 else
2022 {
2023 mtCOVERAGE_TEST_MARKER();
2024 }
2025 }
2026 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2027
2028 return xYieldRequired;
2029 }
2030
2031#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2032/*-----------------------------------------------------------*/
2033
2034void vTaskStartScheduler( void )
2035{
2036BaseType_t xReturn;
2037
2038 /* Add the idle task at the lowest priority. */
2039 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
2040 {
2041 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
2042 StackType_t *pxIdleTaskStackBuffer = NULL;
2043 uint32_t ulIdleTaskStackSize;
2044
2045 /* The Idle task is created using user provided RAM - obtain the
2046 address of the RAM then create the idle task. */
2047 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
2048 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
2049 configIDLE_TASK_NAME,
2050 ulIdleTaskStackSize,
2051 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
2052 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2053 pxIdleTaskStackBuffer,
2054 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2055
2056 if( xIdleTaskHandle != NULL )
2057 {
2058 xReturn = pdPASS;
2059 }
2060 else
2061 {
2062 xReturn = pdFAIL;
2063 }
2064 }
2065 #else
2066 {
2067 /* The Idle task is being created using dynamically allocated RAM. */
2068 xReturn = xTaskCreate( prvIdleTask,
2069 configIDLE_TASK_NAME,
2070 configMINIMAL_STACK_SIZE,
2071 ( void * ) NULL,
2072 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2073 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2074 }
2075 #endif /* configSUPPORT_STATIC_ALLOCATION */
2076
2077 #if ( configUSE_TIMERS == 1 )
2078 {
2079 if( xReturn == pdPASS )
2080 {
2081 xReturn = xTimerCreateTimerTask();
2082 }
2083 else
2084 {
2085 mtCOVERAGE_TEST_MARKER();
2086 }
2087 }
2088 #endif /* configUSE_TIMERS */
2089
2090 if( xReturn == pdPASS )
2091 {
2092 /* freertos_tasks_c_additions_init() should only be called if the user
2093 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2094 the only macro called by the function. */
2095 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2096 {
2097 freertos_tasks_c_additions_init();
2098 }
2099 #endif
2100
2101 /* Interrupts are turned off here, to ensure a tick does not occur
2102 before or during the call to xPortStartScheduler(). The stacks of
2103 the created tasks contain a status word with interrupts switched on
2104 so interrupts will automatically get re-enabled when the first task
2105 starts to run. */
2106 portDISABLE_INTERRUPTS();
2107
2108 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2109 {
2110 /* Switch Newlib's _impure_ptr variable to point to the _reent
2111 structure specific to the task that will run first. */
2112 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
2113 }
2114 #endif /* configUSE_NEWLIB_REENTRANT */
2115
2116 xNextTaskUnblockTime = portMAX_DELAY;
2117 xSchedulerRunning = pdTRUE;
2118 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2119
2120 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2121 macro must be defined to configure the timer/counter used to generate
2122 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2123 is set to 0 and the following line fails to build then ensure you do not
2124 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2125 FreeRTOSConfig.h file. */
2126 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2127
2128 traceTASK_SWITCHED_IN();
2129
2130 /* Setting up the timer tick is hardware specific and thus in the
2131 portable interface. */
2132 if( xPortStartScheduler() != pdFALSE )
2133 {
2134 /* Should not reach here as if the scheduler is running the
2135 function will not return. */
2136 }
2137 else
2138 {
2139 /* Should only reach here if a task calls xTaskEndScheduler(). */
2140 }
2141 }
2142 else
2143 {
2144 /* This line will only be reached if the kernel could not be started,
2145 because there was not enough FreeRTOS heap to create the idle task
2146 or the timer task. */
2147 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2148 }
2149
2150 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2151 meaning xIdleTaskHandle is not used anywhere else. */
2152 ( void ) xIdleTaskHandle;
2153}
2154/*-----------------------------------------------------------*/
2155
2156void vTaskEndScheduler( void )
2157{
2158 /* Stop the scheduler interrupts and call the portable scheduler end
2159 routine so the original ISRs can be restored if necessary. The port
2160 layer must ensure interrupts enable bit is left in the correct state. */
2161 portDISABLE_INTERRUPTS();
2162 xSchedulerRunning = pdFALSE;
2163 vPortEndScheduler();
2164}
2165/*----------------------------------------------------------*/
2166
2167void vTaskSuspendAll( void )
2168{
2169 /* A critical section is not required as the variable is of type
2170 BaseType_t. Please read Richard Barry's reply in the following link to a
2171 post in the FreeRTOS support forum before reporting this as a bug! -
2172 http://goo.gl/wu4acr */
2173 ++uxSchedulerSuspended;
2174 portMEMORY_BARRIER();
2175}
2176/*----------------------------------------------------------*/
2177
2178#if ( configUSE_TICKLESS_IDLE != 0 )
2179
2180 static TickType_t prvGetExpectedIdleTime( void )
2181 {
2182 TickType_t xReturn;
2183 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
2184
2185 /* uxHigherPriorityReadyTasks takes care of the case where
2186 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
2187 task that are in the Ready state, even though the idle task is
2188 running. */
2189 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
2190 {
2191 if( uxTopReadyPriority > tskIDLE_PRIORITY )
2192 {
2193 uxHigherPriorityReadyTasks = pdTRUE;
2194 }
2195 }
2196 #else
2197 {
2198 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
2199
2200 /* When port optimised task selection is used the uxTopReadyPriority
2201 variable is used as a bit map. If bits other than the least
2202 significant bit are set then there are tasks that have a priority
2203 above the idle priority that are in the Ready state. This takes
2204 care of the case where the co-operative scheduler is in use. */
2205 if( uxTopReadyPriority > uxLeastSignificantBit )
2206 {
2207 uxHigherPriorityReadyTasks = pdTRUE;
2208 }
2209 }
2210 #endif
2211
2212 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
2213 {
2214 xReturn = 0;
2215 }
2216 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2217 {
2218 /* There are other idle priority tasks in the ready state. If
2219 time slicing is used then the very next tick interrupt must be
2220 processed. */
2221 xReturn = 0;
2222 }
2223 else if( uxHigherPriorityReadyTasks != pdFALSE )
2224 {
2225 /* There are tasks in the Ready state that have a priority above the
2226 idle priority. This path can only be reached if
2227 configUSE_PREEMPTION is 0. */
2228 xReturn = 0;
2229 }
2230 else
2231 {
2232 xReturn = xNextTaskUnblockTime - xTickCount;
2233 }
2234
2235 return xReturn;
2236 }
2237
2238#endif /* configUSE_TICKLESS_IDLE */
2239/*----------------------------------------------------------*/
2240
2241BaseType_t xTaskResumeAll( void )
2242{
2243TCB_t *pxTCB = NULL;
2244BaseType_t xAlreadyYielded = pdFALSE;
2245
2246 /* If uxSchedulerSuspended is zero then this function does not match a
2247 previous call to vTaskSuspendAll(). */
2248 configASSERT( uxSchedulerSuspended );
2249
2250 /* It is possible that an ISR caused a task to be removed from an event
2251 list while the scheduler was suspended. If this was the case then the
2252 removed task will have been added to the xPendingReadyList. Once the
2253 scheduler has been resumed it is safe to move all the pending ready
2254 tasks from this list into their appropriate ready list. */
2255 taskENTER_CRITICAL();
2256 {
2257 --uxSchedulerSuspended;
2258
2259 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2260 {
2261 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2262 {
2263 /* Move any readied tasks from the pending list into the
2264 appropriate ready list. */
2265 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
2266 {
2267 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2268 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2269 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2270 prvAddTaskToReadyList( pxTCB );
2271
2272 /* If the moved task has a priority higher than the current
2273 task then a yield must be performed. */
2274 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2275 {
2276 xYieldPending = pdTRUE;
2277 }
2278 else
2279 {
2280 mtCOVERAGE_TEST_MARKER();
2281 }
2282 }
2283
2284 if( pxTCB != NULL )
2285 {
2286 /* A task was unblocked while the scheduler was suspended,
2287 which may have prevented the next unblock time from being
2288 re-calculated, in which case re-calculate it now. Mainly
2289 important for low power tickless implementations, where
2290 this can prevent an unnecessary exit from low power
2291 state. */
2292 prvResetNextTaskUnblockTime();
2293 }
2294
2295 /* If any ticks occurred while the scheduler was suspended then
2296 they should be processed now. This ensures the tick count does
2297 not slip, and that any delayed tasks are resumed at the correct
2298 time. */
2299 {
2300 UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
2301
2302 if( uxPendedCounts > ( UBaseType_t ) 0U )
2303 {
2304 do
2305 {
2306 if( xTaskIncrementTick() != pdFALSE )
2307 {
2308 xYieldPending = pdTRUE;
2309 }
2310 else
2311 {
2312 mtCOVERAGE_TEST_MARKER();
2313 }
2314 --uxPendedCounts;
2315 } while( uxPendedCounts > ( UBaseType_t ) 0U );
2316
2317 uxPendedTicks = 0;
2318 }
2319 else
2320 {
2321 mtCOVERAGE_TEST_MARKER();
2322 }
2323 }
2324
2325 if( xYieldPending != pdFALSE )
2326 {
2327 #if( configUSE_PREEMPTION != 0 )
2328 {
2329 xAlreadyYielded = pdTRUE;
2330 }
2331 #endif
2332 taskYIELD_IF_USING_PREEMPTION();
2333 }
2334 else
2335 {
2336 mtCOVERAGE_TEST_MARKER();
2337 }
2338 }
2339 }
2340 else
2341 {
2342 mtCOVERAGE_TEST_MARKER();
2343 }
2344 }
2345 taskEXIT_CRITICAL();
2346
2347 return xAlreadyYielded;
2348}
2349/*-----------------------------------------------------------*/
2350
2351TickType_t xTaskGetTickCount( void )
2352{
2353TickType_t xTicks;
2354
2355 /* Critical section required if running on a 16 bit processor. */
2356 portTICK_TYPE_ENTER_CRITICAL();
2357 {
2358 xTicks = xTickCount;
2359 }
2360 portTICK_TYPE_EXIT_CRITICAL();
2361
2362 return xTicks;
2363}
2364/*-----------------------------------------------------------*/
2365
2366TickType_t xTaskGetTickCountFromISR( void )
2367{
2368TickType_t xReturn;
2369UBaseType_t uxSavedInterruptStatus;
2370
2371 /* RTOS ports that support interrupt nesting have the concept of a maximum
2372 system call (or maximum API call) interrupt priority. Interrupts that are
2373 above the maximum system call priority are kept permanently enabled, even
2374 when the RTOS kernel is in a critical section, but cannot make any calls to
2375 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2376 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2377 failure if a FreeRTOS API function is called from an interrupt that has been
2378 assigned a priority above the configured maximum system call priority.
2379 Only FreeRTOS functions that end in FromISR can be called from interrupts
2380 that have been assigned a priority at or (logically) below the maximum
2381 system call interrupt priority. FreeRTOS maintains a separate interrupt
2382 safe API to ensure interrupt entry is as fast and as simple as possible.
2383 More information (albeit Cortex-M specific) is provided on the following
2384 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2385 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2386
2387 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2388 {
2389 xReturn = xTickCount;
2390 }
2391 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2392
2393 return xReturn;
2394}
2395/*-----------------------------------------------------------*/
2396
2397UBaseType_t uxTaskGetNumberOfTasks( void )
2398{
2399 /* A critical section is not required because the variables are of type
2400 BaseType_t. */
2401 return uxCurrentNumberOfTasks;
2402}
2403/*-----------------------------------------------------------*/
2404
2405char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2406{
2407TCB_t *pxTCB;
2408
2409 /* If null is passed in here then the name of the calling task is being
2410 queried. */
2411 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
shijie.xiong392d3962022-03-17 14:07:27 +08002412#ifdef CONFIG_DMALLOC
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002413 if (pxTCB == NULL)
2414 return NULL;
2415#else
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002416 configASSERT( pxTCB );
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002417#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002418 return &( pxTCB->pcTaskName[ 0 ] );
2419}
Kelvin Zhang7f929772021-12-31 17:58:17 +08002420
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002421#if ( INCLUDE_xTaskGetHandle == 1 )
2422
2423 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
2424 {
2425 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
2426 UBaseType_t x;
2427 char cNextChar;
2428 BaseType_t xBreakLoop;
2429
2430 /* This function is called with the scheduler suspended. */
2431
2432 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2433 {
2434 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2435
2436 do
2437 {
2438 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2439
2440 /* Check each character in the name looking for a match or
2441 mismatch. */
2442 xBreakLoop = pdFALSE;
2443 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2444 {
2445 cNextChar = pxNextTCB->pcTaskName[ x ];
2446
2447 if( cNextChar != pcNameToQuery[ x ] )
2448 {
2449 /* Characters didn't match. */
2450 xBreakLoop = pdTRUE;
2451 }
2452 else if( cNextChar == ( char ) 0x00 )
2453 {
2454 /* Both strings terminated, a match must have been
2455 found. */
2456 pxReturn = pxNextTCB;
2457 xBreakLoop = pdTRUE;
2458 }
2459 else
2460 {
2461 mtCOVERAGE_TEST_MARKER();
2462 }
2463
2464 if( xBreakLoop != pdFALSE )
2465 {
2466 break;
2467 }
2468 }
2469
2470 if( pxReturn != NULL )
2471 {
2472 /* The handle has been found. */
2473 break;
2474 }
2475
2476 } while( pxNextTCB != pxFirstTCB );
2477 }
2478 else
2479 {
2480 mtCOVERAGE_TEST_MARKER();
2481 }
2482
2483 return pxReturn;
2484 }
2485
2486#endif /* INCLUDE_xTaskGetHandle */
2487/*-----------------------------------------------------------*/
2488
2489#if ( INCLUDE_xTaskGetHandle == 1 )
2490
2491 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2492 {
2493 UBaseType_t uxQueue = configMAX_PRIORITIES;
2494 TCB_t* pxTCB;
2495
2496 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2497 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2498
2499 vTaskSuspendAll();
2500 {
2501 /* Search the ready lists. */
2502 do
2503 {
2504 uxQueue--;
2505 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2506
2507 if( pxTCB != NULL )
2508 {
2509 /* Found the handle. */
2510 break;
2511 }
2512
2513 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2514
2515 /* Search the delayed lists. */
2516 if( pxTCB == NULL )
2517 {
2518 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2519 }
2520
2521 if( pxTCB == NULL )
2522 {
2523 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2524 }
2525
2526 #if ( INCLUDE_vTaskSuspend == 1 )
2527 {
2528 if( pxTCB == NULL )
2529 {
2530 /* Search the suspended list. */
2531 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2532 }
2533 }
2534 #endif
2535
2536 #if( INCLUDE_vTaskDelete == 1 )
2537 {
2538 if( pxTCB == NULL )
2539 {
2540 /* Search the deleted list. */
2541 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2542 }
2543 }
2544 #endif
2545 }
2546 ( void ) xTaskResumeAll();
2547
2548 return pxTCB;
2549 }
2550
2551#endif /* INCLUDE_xTaskGetHandle */
2552/*-----------------------------------------------------------*/
2553
2554#if ( configUSE_TRACE_FACILITY == 1 )
2555
2556 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2557 {
2558 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2559
2560 vTaskSuspendAll();
2561 {
2562 /* Is there a space in the array for each task in the system? */
2563 if( uxArraySize >= uxCurrentNumberOfTasks )
2564 {
2565 /* Fill in an TaskStatus_t structure with information on each
2566 task in the Ready state. */
2567 do
2568 {
2569 uxQueue--;
2570 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2571
2572 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2573
2574 /* Fill in an TaskStatus_t structure with information on each
2575 task in the Blocked state. */
2576 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2577 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2578
2579 #if( INCLUDE_vTaskDelete == 1 )
2580 {
2581 /* Fill in an TaskStatus_t structure with information on
2582 each task that has been deleted but not yet cleaned up. */
2583 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2584 }
2585 #endif
2586
2587 #if ( INCLUDE_vTaskSuspend == 1 )
2588 {
2589 /* Fill in an TaskStatus_t structure with information on
2590 each task in the Suspended state. */
2591 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2592 }
2593 #endif
2594
2595 #if ( configGENERATE_RUN_TIME_STATS == 1)
2596 {
2597 if( pulTotalRunTime != NULL )
2598 {
2599 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2600 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2601 #else
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08002602 *pulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08002603 #endif
2604 }
2605 }
2606 #else
2607 {
2608 if( pulTotalRunTime != NULL )
2609 {
2610 *pulTotalRunTime = 0;
2611 }
2612 }
2613 #endif
2614 }
2615 else
2616 {
2617 mtCOVERAGE_TEST_MARKER();
2618 }
2619 }
2620 ( void ) xTaskResumeAll();
2621
2622 return uxTask;
2623 }
2624
2625#endif /* configUSE_TRACE_FACILITY */
2626/*----------------------------------------------------------*/
2627
2628#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2629
2630 TaskHandle_t xTaskGetIdleTaskHandle( void )
2631 {
2632 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2633 started, then xIdleTaskHandle will be NULL. */
2634 configASSERT( ( xIdleTaskHandle != NULL ) );
2635 return xIdleTaskHandle;
2636 }
2637
2638#endif /* INCLUDE_xTaskGetIdleTaskHandle */
2639/*----------------------------------------------------------*/
2640
2641/* This conditional compilation should use inequality to 0, not equality to 1.
2642This is to ensure vTaskStepTick() is available when user defined low power mode
2643implementations require configUSE_TICKLESS_IDLE to be set to a value other than
26441. */
2645#if ( configUSE_TICKLESS_IDLE != 0 )
2646
2647 void vTaskStepTick( const TickType_t xTicksToJump )
2648 {
2649 /* Correct the tick count value after a period during which the tick
2650 was suppressed. Note this does *not* call the tick hook function for
2651 each stepped tick. */
2652 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2653 xTickCount += xTicksToJump;
2654 traceINCREASE_TICK_COUNT( xTicksToJump );
2655 }
2656
2657#endif /* configUSE_TICKLESS_IDLE */
2658/*----------------------------------------------------------*/
2659
2660#if ( INCLUDE_xTaskAbortDelay == 1 )
2661
2662 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2663 {
2664 TCB_t *pxTCB = xTask;
2665 BaseType_t xReturn;
2666
2667 configASSERT( pxTCB );
2668
2669 vTaskSuspendAll();
2670 {
2671 /* A task can only be prematurely removed from the Blocked state if
2672 it is actually in the Blocked state. */
2673 if( eTaskGetState( xTask ) == eBlocked )
2674 {
2675 xReturn = pdPASS;
2676
2677 /* Remove the reference to the task from the blocked list. An
2678 interrupt won't touch the xStateListItem because the
2679 scheduler is suspended. */
2680 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2681
2682 /* Is the task waiting on an event also? If so remove it from
2683 the event list too. Interrupts can touch the event list item,
2684 even though the scheduler is suspended, so a critical section
2685 is used. */
2686 taskENTER_CRITICAL();
2687 {
2688 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2689 {
2690 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2691 pxTCB->ucDelayAborted = pdTRUE;
2692 }
2693 else
2694 {
2695 mtCOVERAGE_TEST_MARKER();
2696 }
2697 }
2698 taskEXIT_CRITICAL();
2699
2700 /* Place the unblocked task into the appropriate ready list. */
2701 prvAddTaskToReadyList( pxTCB );
2702
2703 /* A task being unblocked cannot cause an immediate context
2704 switch if preemption is turned off. */
2705 #if ( configUSE_PREEMPTION == 1 )
2706 {
2707 /* Preemption is on, but a context switch should only be
2708 performed if the unblocked task has a priority that is
2709 equal to or higher than the currently executing task. */
2710 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2711 {
2712 /* Pend the yield to be performed when the scheduler
2713 is unsuspended. */
2714 xYieldPending = pdTRUE;
2715 }
2716 else
2717 {
2718 mtCOVERAGE_TEST_MARKER();
2719 }
2720 }
2721 #endif /* configUSE_PREEMPTION */
2722 }
2723 else
2724 {
2725 xReturn = pdFAIL;
2726 }
2727 }
2728 ( void ) xTaskResumeAll();
2729
2730 return xReturn;
2731 }
2732
2733#endif /* INCLUDE_xTaskAbortDelay */
2734/*----------------------------------------------------------*/
2735
2736BaseType_t xTaskIncrementTick( void )
2737{
2738TCB_t * pxTCB;
2739TickType_t xItemValue;
2740BaseType_t xSwitchRequired = pdFALSE;
2741
2742 /* Called by the portable layer each time a tick interrupt occurs.
2743 Increments the tick then checks to see if the new tick value will cause any
2744 tasks to be unblocked. */
2745 traceTASK_INCREMENT_TICK( xTickCount );
2746 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2747 {
2748 /* Minor optimisation. The tick count cannot change in this
2749 block. */
2750 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
2751
2752 /* Increment the RTOS tick, switching the delayed and overflowed
2753 delayed lists if it wraps to 0. */
2754 xTickCount = xConstTickCount;
2755
2756 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
2757 {
2758 taskSWITCH_DELAYED_LISTS();
2759 }
2760 else
2761 {
2762 mtCOVERAGE_TEST_MARKER();
2763 }
2764
2765 /* See if this tick has made a timeout expire. Tasks are stored in
2766 the queue in the order of their wake time - meaning once one task
2767 has been found whose block time has not expired there is no need to
2768 look any further down the list. */
2769 if( xConstTickCount >= xNextTaskUnblockTime )
2770 {
2771 for( ;; )
2772 {
2773 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2774 {
2775 /* The delayed list is empty. Set xNextTaskUnblockTime
2776 to the maximum possible value so it is extremely
2777 unlikely that the
2778 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2779 next time through. */
2780 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2781 break;
2782 }
2783 else
2784 {
2785 /* The delayed list is not empty, get the value of the
2786 item at the head of the delayed list. This is the time
2787 at which the task at the head of the delayed list must
2788 be removed from the Blocked state. */
2789 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2790 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
2791
2792 if( xConstTickCount < xItemValue )
2793 {
2794 /* It is not time to unblock this item yet, but the
2795 item value is the time at which the task at the head
2796 of the blocked list must be removed from the Blocked
2797 state - so record the item value in
2798 xNextTaskUnblockTime. */
2799 xNextTaskUnblockTime = xItemValue;
2800 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
2801 }
2802 else
2803 {
2804 mtCOVERAGE_TEST_MARKER();
2805 }
2806
2807 /* It is time to remove the item from the Blocked state. */
2808 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2809
2810 /* Is the task waiting on an event also? If so remove
2811 it from the event list. */
2812 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2813 {
2814 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2815 }
2816 else
2817 {
2818 mtCOVERAGE_TEST_MARKER();
2819 }
2820
2821 /* Place the unblocked task into the appropriate ready
2822 list. */
2823 prvAddTaskToReadyList( pxTCB );
2824
2825 /* A task being unblocked cannot cause an immediate
2826 context switch if preemption is turned off. */
2827 #if ( configUSE_PREEMPTION == 1 )
2828 {
2829 /* Preemption is on, but a context switch should
2830 only be performed if the unblocked task has a
2831 priority that is equal to or higher than the
2832 currently executing task. */
2833 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2834 {
2835 xSwitchRequired = pdTRUE;
2836 }
2837 else
2838 {
2839 mtCOVERAGE_TEST_MARKER();
2840 }
2841 }
2842 #endif /* configUSE_PREEMPTION */
2843 }
2844 }
2845 }
2846
2847 /* Tasks of equal priority to the currently running task will share
2848 processing time (time slice) if preemption is on, and the application
2849 writer has not explicitly turned time slicing off. */
2850 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2851 {
2852 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2853 {
2854 xSwitchRequired = pdTRUE;
2855 }
2856 else
2857 {
2858 mtCOVERAGE_TEST_MARKER();
2859 }
2860 }
2861 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2862
2863 #if ( configUSE_TICK_HOOK == 1 )
2864 {
2865 /* Guard against the tick hook being called when the pended tick
2866 count is being unwound (when the scheduler is being unlocked). */
2867 if( uxPendedTicks == ( UBaseType_t ) 0U )
2868 {
2869 vApplicationTickHook();
2870 }
2871 else
2872 {
2873 mtCOVERAGE_TEST_MARKER();
2874 }
2875 }
2876 #endif /* configUSE_TICK_HOOK */
2877 }
2878 else
2879 {
2880 ++uxPendedTicks;
2881
2882 /* The tick hook gets called at regular intervals, even if the
2883 scheduler is locked. */
2884 #if ( configUSE_TICK_HOOK == 1 )
2885 {
2886 vApplicationTickHook();
2887 }
2888 #endif
2889 }
2890
2891 #if ( configUSE_PREEMPTION == 1 )
2892 {
2893 if( xYieldPending != pdFALSE )
2894 {
2895 xSwitchRequired = pdTRUE;
2896 }
2897 else
2898 {
2899 mtCOVERAGE_TEST_MARKER();
2900 }
2901 }
2902 #endif /* configUSE_PREEMPTION */
2903
2904 return xSwitchRequired;
2905}
2906/*-----------------------------------------------------------*/
2907
2908#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2909
2910 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2911 {
2912 TCB_t *xTCB;
2913
2914 /* If xTask is NULL then it is the task hook of the calling task that is
2915 getting set. */
2916 if( xTask == NULL )
2917 {
2918 xTCB = ( TCB_t * ) pxCurrentTCB;
2919 }
2920 else
2921 {
2922 xTCB = xTask;
2923 }
2924
2925 /* Save the hook function in the TCB. A critical section is required as
2926 the value can be accessed from an interrupt. */
2927 taskENTER_CRITICAL();
2928 {
2929 xTCB->pxTaskTag = pxHookFunction;
2930 }
2931 taskEXIT_CRITICAL();
2932 }
2933
2934#endif /* configUSE_APPLICATION_TASK_TAG */
2935/*-----------------------------------------------------------*/
2936
2937#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2938
2939 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2940 {
2941 TCB_t *pxTCB;
2942 TaskHookFunction_t xReturn;
2943
2944 /* If xTask is NULL then set the calling task's hook. */
2945 pxTCB = prvGetTCBFromHandle( xTask );
2946
2947 /* Save the hook function in the TCB. A critical section is required as
2948 the value can be accessed from an interrupt. */
2949 taskENTER_CRITICAL();
2950 {
2951 xReturn = pxTCB->pxTaskTag;
2952 }
2953 taskEXIT_CRITICAL();
2954
2955 return xReturn;
2956 }
2957
2958#endif /* configUSE_APPLICATION_TASK_TAG */
2959/*-----------------------------------------------------------*/
2960
2961#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2962
2963 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
2964 {
2965 TCB_t *pxTCB;
2966 TaskHookFunction_t xReturn;
2967 UBaseType_t uxSavedInterruptStatus;
2968
2969 /* If xTask is NULL then set the calling task's hook. */
2970 pxTCB = prvGetTCBFromHandle( xTask );
2971
2972 /* Save the hook function in the TCB. A critical section is required as
2973 the value can be accessed from an interrupt. */
2974 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
2975 {
2976 xReturn = pxTCB->pxTaskTag;
2977 }
2978 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2979
2980 return xReturn;
2981 }
2982
2983#endif /* configUSE_APPLICATION_TASK_TAG */
2984/*-----------------------------------------------------------*/
2985
2986#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2987
2988 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2989 {
2990 TCB_t *xTCB;
2991 BaseType_t xReturn;
2992
2993 /* If xTask is NULL then we are calling our own task hook. */
2994 if( xTask == NULL )
2995 {
2996 xTCB = pxCurrentTCB;
2997 }
2998 else
2999 {
3000 xTCB = xTask;
3001 }
3002
3003 if( xTCB->pxTaskTag != NULL )
3004 {
3005 xReturn = xTCB->pxTaskTag( pvParameter );
3006 }
3007 else
3008 {
3009 xReturn = pdFAIL;
3010 }
3011
3012 return xReturn;
3013 }
3014
3015#endif /* configUSE_APPLICATION_TASK_TAG */
3016/*-----------------------------------------------------------*/
3017
3018void vTaskSwitchContext( void )
3019{
3020 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
3021 {
3022 /* The scheduler is currently suspended - do not allow a context
3023 switch. */
3024 xYieldPending = pdTRUE;
3025 }
3026 else
3027 {
3028 xYieldPending = pdFALSE;
3029 traceTASK_SWITCHED_OUT();
3030
3031 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3032 {
3033 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3034 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3035 #else
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003036 ulTotalRunTime = (uint32_t)portGET_RUN_TIME_COUNTER_VALUE();
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003037 #endif
3038
3039 /* Add the amount of time the task has been running to the
3040 accumulated time so far. The time the task started running was
3041 stored in ulTaskSwitchedInTime. Note that there is no overflow
3042 protection here so count values are only valid until the timer
3043 overflows. The guard against negative values is to protect
3044 against suspect run time stat counter implementations - which
3045 are provided by the application, not the kernel. */
3046 if( ulTotalRunTime > ulTaskSwitchedInTime )
3047 {
3048 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003049#if ENABLE_FTRACE
3050 int ret = vGetFtraceIndex();
3051 struct ftrace_node *p = NULL;
3052 if (ret >= 0) {
3053 p = &ptracer->pnode[ret];
3054 p->runtime = (uint32_t)(ulTotalRunTime - ulTaskSwitchedInTime);
3055 p->starttime = ulTaskSwitchedInTime;
3056 p->pid = (uint32_t)pxCurrentTCB->uxTCBNumber;
3057 p->irqnum = 500;
3058 p->type = running;
3059 }
3060#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003061 }
3062 else
3063 {
3064 mtCOVERAGE_TEST_MARKER();
3065 }
3066 ulTaskSwitchedInTime = ulTotalRunTime;
3067 }
3068 #endif /* configGENERATE_RUN_TIME_STATS */
3069
3070 /* Check for stack overflow, if configured. */
3071 taskCHECK_FOR_STACK_OVERFLOW();
3072
3073 /* Before the currently running task is switched out, save its errno. */
3074 #if( configUSE_POSIX_ERRNO == 1 )
3075 {
3076 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
3077 }
3078 #endif
3079
3080 /* Select a new task to run using either the generic C or port
3081 optimised asm code. */
3082 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3083 traceTASK_SWITCHED_IN();
3084
3085 /* After the new task is switched in, update the global errno. */
3086 #if( configUSE_POSIX_ERRNO == 1 )
3087 {
3088 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3089 }
3090 #endif
3091
3092 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3093 {
3094 /* Switch Newlib's _impure_ptr variable to point to the _reent
3095 structure specific to this task. */
3096 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
3097 }
3098 #endif /* configUSE_NEWLIB_REENTRANT */
3099 }
3100}
3101/*-----------------------------------------------------------*/
3102
3103void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
3104{
3105 configASSERT( pxEventList );
3106
3107 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3108 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3109
3110 /* Place the event list item of the TCB in the appropriate event list.
3111 This is placed in the list in priority order so the highest priority task
3112 is the first to be woken by the event. The queue that contains the event
3113 list is locked, preventing simultaneous access from interrupts. */
3114 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3115
3116 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3117}
3118/*-----------------------------------------------------------*/
3119
3120void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3121{
3122 configASSERT( pxEventList );
3123
3124 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3125 the event groups implementation. */
3126 configASSERT( uxSchedulerSuspended != 0 );
3127
3128 /* Store the item value in the event list item. It is safe to access the
3129 event list item here as interrupts won't access the event list item of a
3130 task that is not in the Blocked state. */
3131 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3132
3133 /* Place the event list item of the TCB at the end of the appropriate event
3134 list. It is safe to access the event list here because it is part of an
3135 event group implementation - and interrupts don't access event groups
3136 directly (instead they access them indirectly by pending function calls to
3137 the task level). */
3138 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3139
3140 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3141}
3142/*-----------------------------------------------------------*/
3143
3144#if( configUSE_TIMERS == 1 )
3145
3146 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3147 {
3148 configASSERT( pxEventList );
3149
3150 /* This function should not be called by application code hence the
3151 'Restricted' in its name. It is not part of the public API. It is
3152 designed for use by kernel code, and has special calling requirements -
3153 it should be called with the scheduler suspended. */
3154
3155
3156 /* Place the event list item of the TCB in the appropriate event list.
3157 In this case it is assume that this is the only task that is going to
3158 be waiting on this event list, so the faster vListInsertEnd() function
3159 can be used in place of vListInsert. */
3160 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3161
3162 /* If the task should block indefinitely then set the block time to a
3163 value that will be recognised as an indefinite delay inside the
3164 prvAddCurrentTaskToDelayedList() function. */
3165 if( xWaitIndefinitely != pdFALSE )
3166 {
3167 xTicksToWait = portMAX_DELAY;
3168 }
3169
3170 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
3171 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
3172 }
3173
3174#endif /* configUSE_TIMERS */
3175/*-----------------------------------------------------------*/
3176
3177BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3178{
3179TCB_t *pxUnblockedTCB;
3180BaseType_t xReturn;
3181
3182 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3183 called from a critical section within an ISR. */
3184
3185 /* The event list is sorted in priority order, so the first in the list can
3186 be removed as it is known to be the highest priority. Remove the TCB from
3187 the delayed list, and add it to the ready list.
3188
3189 If an event is for a queue that is locked then this function will never
3190 get called - the lock count on the queue will get modified instead. This
3191 means exclusive access to the event list is guaranteed here.
3192
3193 This function assumes that a check has already been made to ensure that
3194 pxEventList is not empty. */
3195 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3196 configASSERT( pxUnblockedTCB );
3197 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3198
3199 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3200 {
3201 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3202 prvAddTaskToReadyList( pxUnblockedTCB );
3203
3204 #if( configUSE_TICKLESS_IDLE != 0 )
3205 {
3206 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3207 might be set to the blocked task's time out time. If the task is
3208 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3209 normally left unchanged, because it is automatically reset to a new
3210 value when the tick count equals xNextTaskUnblockTime. However if
3211 tickless idling is used it might be more important to enter sleep mode
3212 at the earliest possible time - so reset xNextTaskUnblockTime here to
3213 ensure it is updated at the earliest possible time. */
3214 prvResetNextTaskUnblockTime();
3215 }
3216 #endif
3217 }
3218 else
3219 {
3220 /* The delayed and ready lists cannot be accessed, so hold this task
3221 pending until the scheduler is resumed. */
3222 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
3223 }
3224
3225 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3226 {
3227 /* Return true if the task removed from the event list has a higher
3228 priority than the calling task. This allows the calling task to know if
3229 it should force a context switch now. */
3230 xReturn = pdTRUE;
3231
3232 /* Mark that a yield is pending in case the user is not using the
3233 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3234 xYieldPending = pdTRUE;
3235 }
3236 else
3237 {
3238 xReturn = pdFALSE;
3239 }
3240
3241 return xReturn;
3242}
3243/*-----------------------------------------------------------*/
3244
3245void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3246{
3247TCB_t *pxUnblockedTCB;
3248
3249 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3250 the event flags implementation. */
3251 configASSERT( uxSchedulerSuspended != pdFALSE );
3252
3253 /* Store the new item value in the event list. */
3254 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3255
3256 /* Remove the event list form the event flag. Interrupts do not access
3257 event flags. */
3258 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3259 configASSERT( pxUnblockedTCB );
3260 ( void ) uxListRemove( pxEventListItem );
3261
3262 /* Remove the task from the delayed list and add it to the ready list. The
3263 scheduler is suspended so interrupts will not be accessing the ready
3264 lists. */
3265 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3266 prvAddTaskToReadyList( pxUnblockedTCB );
3267
3268 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3269 {
3270 /* The unblocked task has a priority above that of the calling task, so
3271 a context switch is required. This function is called with the
3272 scheduler suspended so xYieldPending is set so the context switch
3273 occurs immediately that the scheduler is resumed (unsuspended). */
3274 xYieldPending = pdTRUE;
3275 }
3276}
3277/*-----------------------------------------------------------*/
3278
3279void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3280{
3281 configASSERT( pxTimeOut );
3282 taskENTER_CRITICAL();
3283 {
3284 pxTimeOut->xOverflowCount = xNumOfOverflows;
3285 pxTimeOut->xTimeOnEntering = xTickCount;
3286 }
3287 taskEXIT_CRITICAL();
3288}
3289/*-----------------------------------------------------------*/
3290
3291void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3292{
3293 /* For internal use only as it does not use a critical section. */
3294 pxTimeOut->xOverflowCount = xNumOfOverflows;
3295 pxTimeOut->xTimeOnEntering = xTickCount;
3296}
3297/*-----------------------------------------------------------*/
3298
3299BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3300{
3301BaseType_t xReturn;
3302
3303 configASSERT( pxTimeOut );
3304 configASSERT( pxTicksToWait );
3305
3306 taskENTER_CRITICAL();
3307 {
3308 /* Minor optimisation. The tick count cannot change in this block. */
3309 const TickType_t xConstTickCount = xTickCount;
3310 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3311
3312 #if( INCLUDE_xTaskAbortDelay == 1 )
3313 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
3314 {
3315 /* The delay was aborted, which is not the same as a time out,
3316 but has the same result. */
3317 pxCurrentTCB->ucDelayAborted = pdFALSE;
3318 xReturn = pdTRUE;
3319 }
3320 else
3321 #endif
3322
3323 #if ( INCLUDE_vTaskSuspend == 1 )
3324 if( *pxTicksToWait == portMAX_DELAY )
3325 {
3326 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3327 specified is the maximum block time then the task should block
3328 indefinitely, and therefore never time out. */
3329 xReturn = pdFALSE;
3330 }
3331 else
3332 #endif
3333
3334 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3335 {
3336 /* The tick count is greater than the time at which
3337 vTaskSetTimeout() was called, but has also overflowed since
3338 vTaskSetTimeOut() was called. It must have wrapped all the way
3339 around and gone past again. This passed since vTaskSetTimeout()
3340 was called. */
3341 xReturn = pdTRUE;
3342 }
3343 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3344 {
3345 /* Not a genuine timeout. Adjust parameters for time remaining. */
3346 *pxTicksToWait -= xElapsedTime;
3347 vTaskInternalSetTimeOutState( pxTimeOut );
3348 xReturn = pdFALSE;
3349 }
3350 else
3351 {
3352 *pxTicksToWait = 0;
3353 xReturn = pdTRUE;
3354 }
3355 }
3356 taskEXIT_CRITICAL();
3357
3358 return xReturn;
3359}
3360/*-----------------------------------------------------------*/
3361
3362void vTaskMissedYield( void )
3363{
3364 xYieldPending = pdTRUE;
3365}
3366/*-----------------------------------------------------------*/
3367
3368#if ( configUSE_TRACE_FACILITY == 1 )
3369
3370 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3371 {
3372 UBaseType_t uxReturn;
3373 TCB_t const *pxTCB;
3374
3375 if( xTask != NULL )
3376 {
3377 pxTCB = xTask;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003378 uxReturn = pxTCB->uxTCBNumber;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003379 }
3380 else
3381 {
3382 uxReturn = 0U;
3383 }
3384
3385 return uxReturn;
3386 }
3387
3388#endif /* configUSE_TRACE_FACILITY */
3389/*-----------------------------------------------------------*/
3390
3391#if ( configUSE_TRACE_FACILITY == 1 )
3392
3393 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3394 {
3395 TCB_t * pxTCB;
3396
3397 if( xTask != NULL )
3398 {
3399 pxTCB = xTask;
3400 pxTCB->uxTaskNumber = uxHandle;
3401 }
3402 }
3403
3404#endif /* configUSE_TRACE_FACILITY */
3405
3406/*
3407 * -----------------------------------------------------------
3408 * The Idle task.
3409 * ----------------------------------------------------------
3410 *
3411 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3412 * language extensions. The equivalent prototype for this function is:
3413 *
3414 * void prvIdleTask( void *pvParameters );
3415 *
3416 */
3417static portTASK_FUNCTION( prvIdleTask, pvParameters )
3418{
3419 /* Stop warnings. */
3420 ( void ) pvParameters;
3421
3422 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3423 SCHEDULER IS STARTED. **/
3424
3425 /* In case a task that has a secure context deletes itself, in which case
3426 the idle task is responsible for deleting the task's secure context, if
3427 any. */
3428 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3429
3430 for( ;; )
3431 {
3432 /* See if any tasks have deleted themselves - if so then the idle task
3433 is responsible for freeing the deleted task's TCB and stack. */
3434 prvCheckTasksWaitingTermination();
3435
3436 #if ( configUSE_PREEMPTION == 0 )
3437 {
3438 /* If we are not using preemption we keep forcing a task switch to
3439 see if any other task has become available. If we are using
3440 preemption we don't need to do this as any task becoming available
3441 will automatically get the processor anyway. */
3442 taskYIELD();
3443 }
3444 #endif /* configUSE_PREEMPTION */
3445
3446 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3447 {
3448 /* When using preemption tasks of equal priority will be
3449 timesliced. If a task that is sharing the idle priority is ready
3450 to run then the idle task should yield before the end of the
3451 timeslice.
3452
3453 A critical region is not required here as we are just reading from
3454 the list, and an occasional incorrect value will not matter. If
3455 the ready list at the idle priority contains more than one task
3456 then a task other than the idle task is ready to execute. */
3457 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3458 {
3459 taskYIELD();
3460 }
3461 else
3462 {
3463 mtCOVERAGE_TEST_MARKER();
3464 }
3465 }
3466 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3467
3468 #if ( configUSE_IDLE_HOOK == 1 )
3469 {
3470 extern void vApplicationIdleHook( void );
3471
3472 /* Call the user defined function from within the idle task. This
3473 allows the application designer to add background functionality
3474 without the overhead of a separate task.
3475 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3476 CALL A FUNCTION THAT MIGHT BLOCK. */
3477 vApplicationIdleHook();
3478 }
3479 #endif /* configUSE_IDLE_HOOK */
3480
3481 /* This conditional compilation should use inequality to 0, not equality
3482 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3483 user defined low power mode implementations require
3484 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3485 #if ( configUSE_TICKLESS_IDLE != 0 )
3486 {
3487 TickType_t xExpectedIdleTime;
3488
3489 /* It is not desirable to suspend then resume the scheduler on
3490 each iteration of the idle task. Therefore, a preliminary
3491 test of the expected idle time is performed without the
3492 scheduler suspended. The result here is not necessarily
3493 valid. */
3494 xExpectedIdleTime = prvGetExpectedIdleTime();
3495
3496 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3497 {
3498 vTaskSuspendAll();
3499 {
3500 /* Now the scheduler is suspended, the expected idle
3501 time can be sampled again, and this time its value can
3502 be used. */
3503 configASSERT( xNextTaskUnblockTime >= xTickCount );
3504 xExpectedIdleTime = prvGetExpectedIdleTime();
3505
3506 /* Define the following macro to set xExpectedIdleTime to 0
3507 if the application does not want
3508 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3509 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3510
3511 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3512 {
3513 traceLOW_POWER_IDLE_BEGIN();
3514 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3515 traceLOW_POWER_IDLE_END();
3516 }
3517 else
3518 {
3519 mtCOVERAGE_TEST_MARKER();
3520 }
3521 }
3522 ( void ) xTaskResumeAll();
3523 }
3524 else
3525 {
3526 mtCOVERAGE_TEST_MARKER();
3527 }
3528 }
3529 #endif /* configUSE_TICKLESS_IDLE */
3530 }
3531}
3532/*-----------------------------------------------------------*/
3533
3534#if( configUSE_TICKLESS_IDLE != 0 )
3535
3536 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3537 {
3538 /* The idle task exists in addition to the application tasks. */
3539 const UBaseType_t uxNonApplicationTasks = 1;
3540 eSleepModeStatus eReturn = eStandardSleep;
3541
3542 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
3543 {
3544 /* A task was made ready while the scheduler was suspended. */
3545 eReturn = eAbortSleep;
3546 }
3547 else if( xYieldPending != pdFALSE )
3548 {
3549 /* A yield was pended while the scheduler was suspended. */
3550 eReturn = eAbortSleep;
3551 }
3552 else
3553 {
3554 /* If all the tasks are in the suspended list (which might mean they
3555 have an infinite block time rather than actually being suspended)
3556 then it is safe to turn all clocks off and just wait for external
3557 interrupts. */
3558 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3559 {
3560 eReturn = eNoTasksWaitingTimeout;
3561 }
3562 else
3563 {
3564 mtCOVERAGE_TEST_MARKER();
3565 }
3566 }
3567
3568 return eReturn;
3569 }
3570
3571#endif /* configUSE_TICKLESS_IDLE */
3572/*-----------------------------------------------------------*/
3573
3574#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3575
3576 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3577 {
3578 TCB_t *pxTCB;
3579
3580 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3581 {
3582 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3583 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3584 }
3585 }
3586
3587#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3588/*-----------------------------------------------------------*/
3589
3590#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3591
3592 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3593 {
3594 void *pvReturn = NULL;
3595 TCB_t *pxTCB;
3596
3597 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3598 {
3599 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3600 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3601 }
3602 else
3603 {
3604 pvReturn = NULL;
3605 }
3606
3607 return pvReturn;
3608 }
3609
3610#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3611/*-----------------------------------------------------------*/
3612
3613#if ( portUSING_MPU_WRAPPERS == 1 )
3614
3615 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3616 {
3617 TCB_t *pxTCB;
3618
3619 /* If null is passed in here then we are modifying the MPU settings of
3620 the calling task. */
3621 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3622
3623 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3624 }
3625
3626#endif /* portUSING_MPU_WRAPPERS */
3627/*-----------------------------------------------------------*/
3628
3629static void prvInitialiseTaskLists( void )
3630{
3631UBaseType_t uxPriority;
3632
3633 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3634 {
3635 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3636 }
3637
3638 vListInitialise( &xDelayedTaskList1 );
3639 vListInitialise( &xDelayedTaskList2 );
3640 vListInitialise( &xPendingReadyList );
3641
3642 #if ( INCLUDE_vTaskDelete == 1 )
3643 {
3644 vListInitialise( &xTasksWaitingTermination );
3645 }
3646 #endif /* INCLUDE_vTaskDelete */
3647
3648 #if ( INCLUDE_vTaskSuspend == 1 )
3649 {
3650 vListInitialise( &xSuspendedTaskList );
3651 }
3652 #endif /* INCLUDE_vTaskSuspend */
3653
3654 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3655 using list2. */
3656 pxDelayedTaskList = &xDelayedTaskList1;
3657 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3658}
3659/*-----------------------------------------------------------*/
3660
3661static void prvCheckTasksWaitingTermination( void )
3662{
3663
3664 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
3665
3666 #if ( INCLUDE_vTaskDelete == 1 )
3667 {
3668 TCB_t *pxTCB;
3669
3670 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
3671 being called too often in the idle task. */
3672 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
3673 {
3674 taskENTER_CRITICAL();
3675 {
3676 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3677 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3678 --uxCurrentNumberOfTasks;
3679 --uxDeletedTasksWaitingCleanUp;
3680 }
3681 taskEXIT_CRITICAL();
3682
3683 prvDeleteTCB( pxTCB );
3684 }
3685 }
3686 #endif /* INCLUDE_vTaskDelete */
3687}
3688/*-----------------------------------------------------------*/
3689
3690#if( configUSE_TRACE_FACILITY == 1 )
3691
3692 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
3693 {
3694 TCB_t *pxTCB;
3695
3696 /* xTask is NULL then get the state of the calling task. */
3697 pxTCB = prvGetTCBFromHandle( xTask );
3698
3699 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
3700 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
3701 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
3702 pxTaskStatus->pxStackBase = pxTCB->pxStack;
3703 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08003704 pxTaskStatus->uStackTotal = pxTCB->uStackDepth;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08003705
3706 #if ( configUSE_MUTEXES == 1 )
3707 {
3708 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
3709 }
3710 #else
3711 {
3712 pxTaskStatus->uxBasePriority = 0;
3713 }
3714 #endif
3715
3716 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3717 {
3718 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
3719 }
3720 #else
3721 {
3722 pxTaskStatus->ulRunTimeCounter = 0;
3723 }
3724 #endif
3725
3726 /* Obtaining the task state is a little fiddly, so is only done if the
3727 value of eState passed into this function is eInvalid - otherwise the
3728 state is just set to whatever is passed in. */
3729 if( eState != eInvalid )
3730 {
3731 if( pxTCB == pxCurrentTCB )
3732 {
3733 pxTaskStatus->eCurrentState = eRunning;
3734 }
3735 else
3736 {
3737 pxTaskStatus->eCurrentState = eState;
3738
3739 #if ( INCLUDE_vTaskSuspend == 1 )
3740 {
3741 /* If the task is in the suspended list then there is a
3742 chance it is actually just blocked indefinitely - so really
3743 it should be reported as being in the Blocked state. */
3744 if( eState == eSuspended )
3745 {
3746 vTaskSuspendAll();
3747 {
3748 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3749 {
3750 pxTaskStatus->eCurrentState = eBlocked;
3751 }
3752 }
3753 ( void ) xTaskResumeAll();
3754 }
3755 }
3756 #endif /* INCLUDE_vTaskSuspend */
3757 }
3758 }
3759 else
3760 {
3761 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
3762 }
3763
3764 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
3765 parameter is provided to allow it to be skipped. */
3766 if( xGetFreeStackSpace != pdFALSE )
3767 {
3768 #if ( portSTACK_GROWTH > 0 )
3769 {
3770 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
3771 }
3772 #else
3773 {
3774 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
3775 }
3776 #endif
3777 }
3778 else
3779 {
3780 pxTaskStatus->usStackHighWaterMark = 0;
3781 }
3782 }
3783
3784#endif /* configUSE_TRACE_FACILITY */
3785/*-----------------------------------------------------------*/
3786
3787#if ( configUSE_TRACE_FACILITY == 1 )
3788
3789 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3790 {
3791 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
3792 UBaseType_t uxTask = 0;
3793
3794 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3795 {
3796 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3797
3798 /* Populate an TaskStatus_t structure within the
3799 pxTaskStatusArray array for each task that is referenced from
3800 pxList. See the definition of TaskStatus_t in task.h for the
3801 meaning of each TaskStatus_t structure member. */
3802 do
3803 {
3804 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3805 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
3806 uxTask++;
3807 } while( pxNextTCB != pxFirstTCB );
3808 }
3809 else
3810 {
3811 mtCOVERAGE_TEST_MARKER();
3812 }
3813
3814 return uxTask;
3815 }
3816
3817#endif /* configUSE_TRACE_FACILITY */
3818/*-----------------------------------------------------------*/
3819
3820#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
3821
3822 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3823 {
3824 uint32_t ulCount = 0U;
3825
3826 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3827 {
3828 pucStackByte -= portSTACK_GROWTH;
3829 ulCount++;
3830 }
3831
3832 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3833
3834 return ( configSTACK_DEPTH_TYPE ) ulCount;
3835 }
3836
3837#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
3838/*-----------------------------------------------------------*/
3839
3840#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
3841
3842 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
3843 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
3844 user to determine the return type. It gets around the problem of the value
3845 overflowing on 8-bit types without breaking backward compatibility for
3846 applications that expect an 8-bit return type. */
3847 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
3848 {
3849 TCB_t *pxTCB;
3850 uint8_t *pucEndOfStack;
3851 configSTACK_DEPTH_TYPE uxReturn;
3852
3853 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
3854 the same except for their return type. Using configSTACK_DEPTH_TYPE
3855 allows the user to determine the return type. It gets around the
3856 problem of the value overflowing on 8-bit types without breaking
3857 backward compatibility for applications that expect an 8-bit return
3858 type. */
3859
3860 pxTCB = prvGetTCBFromHandle( xTask );
3861
3862 #if portSTACK_GROWTH < 0
3863 {
3864 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3865 }
3866 #else
3867 {
3868 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3869 }
3870 #endif
3871
3872 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
3873
3874 return uxReturn;
3875 }
3876
3877#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
3878/*-----------------------------------------------------------*/
3879
3880#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3881
3882 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3883 {
3884 TCB_t *pxTCB;
3885 uint8_t *pucEndOfStack;
3886 UBaseType_t uxReturn;
3887
3888 pxTCB = prvGetTCBFromHandle( xTask );
3889
3890 #if portSTACK_GROWTH < 0
3891 {
3892 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3893 }
3894 #else
3895 {
3896 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3897 }
3898 #endif
3899
3900 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3901
3902 return uxReturn;
3903 }
3904
3905#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3906/*-----------------------------------------------------------*/
3907
3908#if ( INCLUDE_vTaskDelete == 1 )
3909
3910 static void prvDeleteTCB( TCB_t *pxTCB )
3911 {
3912 /* This call is required specifically for the TriCore port. It must be
3913 above the vPortFree() calls. The call is also used by ports/demos that
3914 want to allocate and clean RAM statically. */
3915 portCLEAN_UP_TCB( pxTCB );
3916
3917 /* Free up the memory allocated by the scheduler for the task. It is up
3918 to the task to free any memory allocated at the application level. */
3919 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3920 {
3921 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3922 }
3923 #endif /* configUSE_NEWLIB_REENTRANT */
3924
3925 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3926 {
3927 /* The task can only have been allocated dynamically - free both
3928 the stack and TCB. */
3929 vPortFree( pxTCB->pxStack );
3930 vPortFree( pxTCB );
3931 }
3932 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
3933 {
3934 /* The task could have been allocated statically or dynamically, so
3935 check what was statically allocated before trying to free the
3936 memory. */
3937 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3938 {
3939 /* Both the stack and TCB were allocated dynamically, so both
3940 must be freed. */
3941 vPortFree( pxTCB->pxStack );
3942 vPortFree( pxTCB );
3943 }
3944 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3945 {
3946 /* Only the stack was statically allocated, so the TCB is the
3947 only memory that must be freed. */
3948 vPortFree( pxTCB );
3949 }
3950 else
3951 {
3952 /* Neither the stack nor the TCB were allocated dynamically, so
3953 nothing needs to be freed. */
3954 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
3955 mtCOVERAGE_TEST_MARKER();
3956 }
3957 }
3958 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3959 }
3960
3961#endif /* INCLUDE_vTaskDelete */
3962/*-----------------------------------------------------------*/
3963
3964static void prvResetNextTaskUnblockTime( void )
3965{
3966TCB_t *pxTCB;
3967
3968 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3969 {
3970 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
3971 the maximum possible value so it is extremely unlikely that the
3972 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3973 there is an item in the delayed list. */
3974 xNextTaskUnblockTime = portMAX_DELAY;
3975 }
3976 else
3977 {
3978 /* The new current delayed list is not empty, get the value of
3979 the item at the head of the delayed list. This is the time at
3980 which the task at the head of the delayed list should be removed
3981 from the Blocked state. */
3982 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3983 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
3984 }
3985}
3986/*-----------------------------------------------------------*/
3987
3988#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3989
3990 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3991 {
3992 TaskHandle_t xReturn;
3993
3994 /* A critical section is not required as this is not called from
3995 an interrupt and the current TCB will always be the same for any
3996 individual execution thread. */
3997 xReturn = pxCurrentTCB;
3998
3999 return xReturn;
4000 }
4001
4002#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4003/*-----------------------------------------------------------*/
4004
4005#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4006
4007 BaseType_t xTaskGetSchedulerState( void )
4008 {
4009 BaseType_t xReturn;
4010
4011 if( xSchedulerRunning == pdFALSE )
4012 {
4013 xReturn = taskSCHEDULER_NOT_STARTED;
4014 }
4015 else
4016 {
4017 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
4018 {
4019 xReturn = taskSCHEDULER_RUNNING;
4020 }
4021 else
4022 {
4023 xReturn = taskSCHEDULER_SUSPENDED;
4024 }
4025 }
4026
4027 return xReturn;
4028 }
4029
4030#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4031/*-----------------------------------------------------------*/
4032
4033#if ( configUSE_MUTEXES == 1 )
4034
4035 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4036 {
4037 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4038 BaseType_t xReturn = pdFALSE;
4039
4040 /* If the mutex was given back by an interrupt while the queue was
4041 locked then the mutex holder might now be NULL. _RB_ Is this still
4042 needed as interrupts can no longer use mutexes? */
4043 if( pxMutexHolder != NULL )
4044 {
4045 /* If the holder of the mutex has a priority below the priority of
4046 the task attempting to obtain the mutex then it will temporarily
4047 inherit the priority of the task attempting to obtain the mutex. */
4048 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
4049 {
4050 /* Adjust the mutex holder state to account for its new
4051 priority. Only reset the event list item value if the value is
4052 not being used for anything else. */
4053 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4054 {
4055 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4056 }
4057 else
4058 {
4059 mtCOVERAGE_TEST_MARKER();
4060 }
4061
4062 /* If the task being modified is in the ready state it will need
4063 to be moved into a new list. */
4064 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4065 {
4066 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4067 {
4068 taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );
4069 }
4070 else
4071 {
4072 mtCOVERAGE_TEST_MARKER();
4073 }
4074
4075 /* Inherit the priority before being moved into the new list. */
4076 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4077 prvAddTaskToReadyList( pxMutexHolderTCB );
4078 }
4079 else
4080 {
4081 /* Just inherit the priority. */
4082 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4083 }
4084
4085 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
4086
4087 /* Inheritance occurred. */
4088 xReturn = pdTRUE;
4089 }
4090 else
4091 {
4092 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
4093 {
4094 /* The base priority of the mutex holder is lower than the
4095 priority of the task attempting to take the mutex, but the
4096 current priority of the mutex holder is not lower than the
4097 priority of the task attempting to take the mutex.
4098 Therefore the mutex holder must have already inherited a
4099 priority, but inheritance would have occurred if that had
4100 not been the case. */
4101 xReturn = pdTRUE;
4102 }
4103 else
4104 {
4105 mtCOVERAGE_TEST_MARKER();
4106 }
4107 }
4108 }
4109 else
4110 {
4111 mtCOVERAGE_TEST_MARKER();
4112 }
4113
4114 return xReturn;
4115 }
4116
4117#endif /* configUSE_MUTEXES */
4118/*-----------------------------------------------------------*/
4119
4120#if ( configUSE_MUTEXES == 1 )
4121
4122 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4123 {
4124 TCB_t * const pxTCB = pxMutexHolder;
4125 BaseType_t xReturn = pdFALSE;
4126
4127 if( pxMutexHolder != NULL )
4128 {
4129 /* A task can only have an inherited priority if it holds the mutex.
4130 If the mutex is held by a task then it cannot be given from an
4131 interrupt, and if a mutex is given by the holding task then it must
4132 be the running state task. */
4133 configASSERT( pxTCB == pxCurrentTCB );
4134 configASSERT( pxTCB->uxMutexesHeld );
4135 ( pxTCB->uxMutexesHeld )--;
4136
4137 /* Has the holder of the mutex inherited the priority of another
4138 task? */
4139 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4140 {
4141 /* Only disinherit if no other mutexes are held. */
4142 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4143 {
4144 /* A task can only have an inherited priority if it holds
4145 the mutex. If the mutex is held by a task then it cannot be
4146 given from an interrupt, and if a mutex is given by the
4147 holding task then it must be the running state task. Remove
4148 the holding task from the ready list. */
4149 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4150 {
4151 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4152 }
4153 else
4154 {
4155 mtCOVERAGE_TEST_MARKER();
4156 }
4157
4158 /* Disinherit the priority before adding the task into the
4159 new ready list. */
4160 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4161 pxTCB->uxPriority = pxTCB->uxBasePriority;
4162
4163 /* Reset the event list item value. It cannot be in use for
4164 any other purpose if this task is running, and it must be
4165 running to give back the mutex. */
4166 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4167 prvAddTaskToReadyList( pxTCB );
4168
4169 /* Return true to indicate that a context switch is required.
4170 This is only actually required in the corner case whereby
4171 multiple mutexes were held and the mutexes were given back
4172 in an order different to that in which they were taken.
4173 If a context switch did not occur when the first mutex was
4174 returned, even if a task was waiting on it, then a context
4175 switch should occur when the last mutex is returned whether
4176 a task is waiting on it or not. */
4177 xReturn = pdTRUE;
4178 }
4179 else
4180 {
4181 mtCOVERAGE_TEST_MARKER();
4182 }
4183 }
4184 else
4185 {
4186 mtCOVERAGE_TEST_MARKER();
4187 }
4188 }
4189 else
4190 {
4191 mtCOVERAGE_TEST_MARKER();
4192 }
4193
4194 return xReturn;
4195 }
4196
4197#endif /* configUSE_MUTEXES */
4198/*-----------------------------------------------------------*/
4199
4200#if ( configUSE_MUTEXES == 1 )
4201
4202 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
4203 {
4204 TCB_t * const pxTCB = pxMutexHolder;
4205 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4206 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4207
4208 if( pxMutexHolder != NULL )
4209 {
4210 /* If pxMutexHolder is not NULL then the holder must hold at least
4211 one mutex. */
4212 configASSERT( pxTCB->uxMutexesHeld );
4213
4214 /* Determine the priority to which the priority of the task that
4215 holds the mutex should be set. This will be the greater of the
4216 holding task's base priority and the priority of the highest
4217 priority task that is waiting to obtain the mutex. */
4218 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4219 {
4220 uxPriorityToUse = uxHighestPriorityWaitingTask;
4221 }
4222 else
4223 {
4224 uxPriorityToUse = pxTCB->uxBasePriority;
4225 }
4226
4227 /* Does the priority need to change? */
4228 if( pxTCB->uxPriority != uxPriorityToUse )
4229 {
4230 /* Only disinherit if no other mutexes are held. This is a
4231 simplification in the priority inheritance implementation. If
4232 the task that holds the mutex is also holding other mutexes then
4233 the other mutexes may have caused the priority inheritance. */
4234 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4235 {
4236 /* If a task has timed out because it already holds the
4237 mutex it was trying to obtain then it cannot of inherited
4238 its own priority. */
4239 configASSERT( pxTCB != pxCurrentTCB );
4240
4241 /* Disinherit the priority, remembering the previous
4242 priority to facilitate determining the subject task's
4243 state. */
4244 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4245 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4246 pxTCB->uxPriority = uxPriorityToUse;
4247
4248 /* Only reset the event list item value if the value is not
4249 being used for anything else. */
4250 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4251 {
4252 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4253 }
4254 else
4255 {
4256 mtCOVERAGE_TEST_MARKER();
4257 }
4258
4259 /* If the running task is not the task that holds the mutex
4260 then the task that holds the mutex could be in either the
4261 Ready, Blocked or Suspended states. Only remove the task
4262 from its current state list if it is in the Ready state as
4263 the task's priority is going to change and there is one
4264 Ready list per priority. */
4265 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4266 {
4267 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4268 {
4269 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4270 }
4271 else
4272 {
4273 mtCOVERAGE_TEST_MARKER();
4274 }
4275
4276 prvAddTaskToReadyList( pxTCB );
4277 }
4278 else
4279 {
4280 mtCOVERAGE_TEST_MARKER();
4281 }
4282 }
4283 else
4284 {
4285 mtCOVERAGE_TEST_MARKER();
4286 }
4287 }
4288 else
4289 {
4290 mtCOVERAGE_TEST_MARKER();
4291 }
4292 }
4293 else
4294 {
4295 mtCOVERAGE_TEST_MARKER();
4296 }
4297 }
4298
4299#endif /* configUSE_MUTEXES */
4300/*-----------------------------------------------------------*/
4301
4302#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4303
4304 void vTaskEnterCritical( void )
4305 {
4306 portDISABLE_INTERRUPTS();
4307
4308 if( xSchedulerRunning != pdFALSE )
4309 {
4310 ( pxCurrentTCB->uxCriticalNesting )++;
4311
4312 /* This is not the interrupt safe version of the enter critical
4313 function so assert() if it is being called from an interrupt
4314 context. Only API functions that end in "FromISR" can be used in an
4315 interrupt. Only assert if the critical nesting count is 1 to
4316 protect against recursive calls if the assert function also uses a
4317 critical section. */
4318 if( pxCurrentTCB->uxCriticalNesting == 1 )
4319 {
4320 portASSERT_IF_IN_ISR();
4321 }
4322 }
4323 else
4324 {
4325 mtCOVERAGE_TEST_MARKER();
4326 }
4327 }
4328
4329#endif /* portCRITICAL_NESTING_IN_TCB */
4330/*-----------------------------------------------------------*/
4331
4332#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4333
4334 void vTaskExitCritical( void )
4335 {
4336 if( xSchedulerRunning != pdFALSE )
4337 {
4338 if( pxCurrentTCB->uxCriticalNesting > 0U )
4339 {
4340 ( pxCurrentTCB->uxCriticalNesting )--;
4341
4342 if( pxCurrentTCB->uxCriticalNesting == 0U )
4343 {
4344 portENABLE_INTERRUPTS();
4345 }
4346 else
4347 {
4348 mtCOVERAGE_TEST_MARKER();
4349 }
4350 }
4351 else
4352 {
4353 mtCOVERAGE_TEST_MARKER();
4354 }
4355 }
4356 else
4357 {
4358 mtCOVERAGE_TEST_MARKER();
4359 }
4360 }
4361
4362#endif /* portCRITICAL_NESTING_IN_TCB */
4363/*-----------------------------------------------------------*/
4364
4365#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4366
4367 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4368 {
4369 size_t x;
4370
4371 /* Start by copying the entire string. */
4372 strcpy( pcBuffer, pcTaskName );
4373
4374 /* Pad the end of the string with spaces to ensure columns line up when
4375 printed out. */
4376 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4377 {
4378 pcBuffer[ x ] = ' ';
4379 }
4380
4381 /* Terminate. */
4382 pcBuffer[ x ] = ( char ) 0x00;
4383
4384 /* Return the new end of string. */
4385 return &( pcBuffer[ x ] );
4386 }
4387
4388#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4389/*-----------------------------------------------------------*/
4390
4391#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4392
4393 void vTaskList( char * pcWriteBuffer )
4394 {
4395 TaskStatus_t *pxTaskStatusArray;
4396 UBaseType_t uxArraySize, x;
4397 char cStatus;
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004398 uint32_t ulTotalTime, ulStatsAsPercentage;
4399
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004400
4401 /*
4402 * PLEASE NOTE:
4403 *
4404 * This function is provided for convenience only, and is used by many
4405 * of the demo applications. Do not consider it to be part of the
4406 * scheduler.
4407 *
4408 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4409 * uxTaskGetSystemState() output into a human readable table that
4410 * displays task names, states and stack usage.
4411 *
4412 * vTaskList() has a dependency on the sprintf() C library function that
4413 * might bloat the code size, use a lot of stack, and provide different
4414 * results on different platforms. An alternative, tiny, third party,
4415 * and limited functionality implementation of sprintf() is provided in
4416 * many of the FreeRTOS/Demo sub-directories in a file called
4417 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4418 * snprintf() implementation!).
4419 *
4420 * It is recommended that production systems call uxTaskGetSystemState()
4421 * directly to get access to raw stats data, rather than indirectly
4422 * through a call to vTaskList().
4423 */
4424
4425
4426 /* Make sure the write buffer does not contain a string. */
4427 *pcWriteBuffer = ( char ) 0x00;
4428
4429 /* Take a snapshot of the number of tasks in case it changes while this
4430 function is executing. */
4431 uxArraySize = uxCurrentNumberOfTasks;
4432
4433 /* Allocate an array index for each task. NOTE! if
4434 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4435 equate to NULL. */
4436 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4437
4438 if( pxTaskStatusArray != NULL )
4439 {
4440 /* Generate the (binary) data. */
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004441 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4442 ulTotalTime /= 100UL;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004443
4444 /* Create a human readable table from the binary data. */
4445 for( x = 0; x < uxArraySize; x++ )
4446 {
4447 switch( pxTaskStatusArray[ x ].eCurrentState )
4448 {
4449 case eRunning: cStatus = tskRUNNING_CHAR;
4450 break;
4451
4452 case eReady: cStatus = tskREADY_CHAR;
4453 break;
4454
4455 case eBlocked: cStatus = tskBLOCKED_CHAR;
4456 break;
4457
4458 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4459 break;
4460
4461 case eDeleted: cStatus = tskDELETED_CHAR;
4462 break;
4463
4464 case eInvalid: /* Fall through. */
4465 default: /* Should not get here, but it is included
4466 to prevent static checking errors. */
4467 cStatus = ( char ) 0x00;
4468 break;
4469 }
4470
4471 /* Write the task name to the string, padding with spaces so it
4472 can be printed in tabular form more easily. */
4473 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4474
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004475
4476 ulStatsAsPercentage = ulTotalTime == 0 ? 0 : pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004477 /* Write the rest of the string. */
Xiaohu.Huangdbadd052022-01-26 17:30:36 +08004478 sprintf( pcWriteBuffer, "\t%u\t%c\t%u\t\t%u\t\t%u\t\t%u\t%u\t\r\n",
4479 ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber,
4480 cStatus,
4481 ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority,
4482 ( unsigned int ) pxTaskStatusArray[ x ].uStackTotal,
4483 ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark,
4484 ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter,
4485 ( unsigned int ) ulStatsAsPercentage);
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004486 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4487 }
4488
4489 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4490 is 0 then vPortFree() will be #defined to nothing. */
4491 vPortFree( pxTaskStatusArray );
4492 }
4493 else
4494 {
4495 mtCOVERAGE_TEST_MARKER();
4496 }
4497 }
4498
4499#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
4500/*----------------------------------------------------------*/
4501
4502#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4503
4504 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4505 {
4506 TaskStatus_t *pxTaskStatusArray;
4507 UBaseType_t uxArraySize, x;
4508 uint32_t ulTotalTime, ulStatsAsPercentage;
4509
4510 #if( configUSE_TRACE_FACILITY != 1 )
4511 {
4512 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4513 }
4514 #endif
4515
4516 /*
4517 * PLEASE NOTE:
4518 *
4519 * This function is provided for convenience only, and is used by many
4520 * of the demo applications. Do not consider it to be part of the
4521 * scheduler.
4522 *
4523 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4524 * of the uxTaskGetSystemState() output into a human readable table that
4525 * displays the amount of time each task has spent in the Running state
4526 * in both absolute and percentage terms.
4527 *
4528 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4529 * function that might bloat the code size, use a lot of stack, and
4530 * provide different results on different platforms. An alternative,
4531 * tiny, third party, and limited functionality implementation of
4532 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4533 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4534 * a full snprintf() implementation!).
4535 *
4536 * It is recommended that production systems call uxTaskGetSystemState()
4537 * directly to get access to raw stats data, rather than indirectly
4538 * through a call to vTaskGetRunTimeStats().
4539 */
4540
4541 /* Make sure the write buffer does not contain a string. */
4542 *pcWriteBuffer = ( char ) 0x00;
4543
4544 /* Take a snapshot of the number of tasks in case it changes while this
4545 function is executing. */
4546 uxArraySize = uxCurrentNumberOfTasks;
4547
4548 /* Allocate an array index for each task. NOTE! If
4549 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4550 equate to NULL. */
4551 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4552
4553 if( pxTaskStatusArray != NULL )
4554 {
4555 /* Generate the (binary) data. */
4556 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4557
4558 /* For percentage calculations. */
4559 ulTotalTime /= 100UL;
4560
4561 /* Avoid divide by zero errors. */
4562 if( ulTotalTime > 0UL )
4563 {
4564 /* Create a human readable table from the binary data. */
4565 for( x = 0; x < uxArraySize; x++ )
4566 {
4567 /* What percentage of the total run time has the task used?
4568 This will always be rounded down to the nearest integer.
4569 ulTotalRunTimeDiv100 has already been divided by 100. */
4570 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
4571
4572 /* Write the task name to the string, padding with
4573 spaces so it can be printed in tabular form more
4574 easily. */
4575 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4576
4577 if( ulStatsAsPercentage > 0UL )
4578 {
4579 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4580 {
4581 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4582 }
4583 #else
4584 {
4585 /* sizeof( int ) == sizeof( long ) so a smaller
4586 printf() library can be used. */
4587 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4588 }
4589 #endif
4590 }
4591 else
4592 {
4593 /* If the percentage is zero here then the task has
4594 consumed less than 1% of the total run time. */
4595 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4596 {
4597 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4598 }
4599 #else
4600 {
4601 /* sizeof( int ) == sizeof( long ) so a smaller
4602 printf() library can be used. */
4603 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4604 }
4605 #endif
4606 }
4607
4608 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4609 }
4610 }
4611 else
4612 {
4613 mtCOVERAGE_TEST_MARKER();
4614 }
4615
4616 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4617 is 0 then vPortFree() will be #defined to nothing. */
4618 vPortFree( pxTaskStatusArray );
4619 }
4620 else
4621 {
4622 mtCOVERAGE_TEST_MARKER();
4623 }
4624 }
4625
4626#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
4627/*-----------------------------------------------------------*/
4628
4629TickType_t uxTaskResetEventItemValue( void )
4630{
4631TickType_t uxReturn;
4632
4633 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
4634
4635 /* Reset the event list item to its normal value - so it can be used with
4636 queues and semaphores. */
4637 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4638
4639 return uxReturn;
4640}
4641/*-----------------------------------------------------------*/
4642
4643#if ( configUSE_MUTEXES == 1 )
4644
4645 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
4646 {
4647 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4648 then pxCurrentTCB will be NULL. */
4649 if( pxCurrentTCB != NULL )
4650 {
4651 ( pxCurrentTCB->uxMutexesHeld )++;
4652 }
4653
4654 return pxCurrentTCB;
4655 }
4656
4657#endif /* configUSE_MUTEXES */
4658/*-----------------------------------------------------------*/
4659
4660#if( configUSE_TASK_NOTIFICATIONS == 1 )
4661
4662 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4663 {
4664 uint32_t ulReturn;
4665
4666 taskENTER_CRITICAL();
4667 {
4668 /* Only block if the notification count is not already non-zero. */
4669 if( pxCurrentTCB->ulNotifiedValue == 0UL )
4670 {
4671 /* Mark this task as waiting for a notification. */
4672 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4673
4674 if( xTicksToWait > ( TickType_t ) 0 )
4675 {
4676 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4677 traceTASK_NOTIFY_TAKE_BLOCK();
4678
4679 /* All ports are written to allow a yield in a critical
4680 section (some will yield immediately, others wait until the
4681 critical section exits) - but it is not something that
4682 application code should ever do. */
4683 portYIELD_WITHIN_API();
4684 }
4685 else
4686 {
4687 mtCOVERAGE_TEST_MARKER();
4688 }
4689 }
4690 else
4691 {
4692 mtCOVERAGE_TEST_MARKER();
4693 }
4694 }
4695 taskEXIT_CRITICAL();
4696
4697 taskENTER_CRITICAL();
4698 {
4699 traceTASK_NOTIFY_TAKE();
4700 ulReturn = pxCurrentTCB->ulNotifiedValue;
4701
4702 if( ulReturn != 0UL )
4703 {
4704 if( xClearCountOnExit != pdFALSE )
4705 {
4706 pxCurrentTCB->ulNotifiedValue = 0UL;
4707 }
4708 else
4709 {
4710 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
4711 }
4712 }
4713 else
4714 {
4715 mtCOVERAGE_TEST_MARKER();
4716 }
4717
4718 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4719 }
4720 taskEXIT_CRITICAL();
4721
4722 return ulReturn;
4723 }
4724
4725#endif /* configUSE_TASK_NOTIFICATIONS */
4726/*-----------------------------------------------------------*/
4727
4728#if( configUSE_TASK_NOTIFICATIONS == 1 )
4729
4730 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4731 {
4732 BaseType_t xReturn;
4733
4734 taskENTER_CRITICAL();
4735 {
4736 /* Only block if a notification is not already pending. */
4737 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4738 {
4739 /* Clear bits in the task's notification value as bits may get
4740 set by the notifying task or interrupt. This can be used to
4741 clear the value to zero. */
4742 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4743
4744 /* Mark this task as waiting for a notification. */
4745 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4746
4747 if( xTicksToWait > ( TickType_t ) 0 )
4748 {
4749 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4750 traceTASK_NOTIFY_WAIT_BLOCK();
4751
4752 /* All ports are written to allow a yield in a critical
4753 section (some will yield immediately, others wait until the
4754 critical section exits) - but it is not something that
4755 application code should ever do. */
4756 portYIELD_WITHIN_API();
4757 }
4758 else
4759 {
4760 mtCOVERAGE_TEST_MARKER();
4761 }
4762 }
4763 else
4764 {
4765 mtCOVERAGE_TEST_MARKER();
4766 }
4767 }
4768 taskEXIT_CRITICAL();
4769
4770 taskENTER_CRITICAL();
4771 {
4772 traceTASK_NOTIFY_WAIT();
4773
4774 if( pulNotificationValue != NULL )
4775 {
4776 /* Output the current notification value, which may or may not
4777 have changed. */
4778 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
4779 }
4780
4781 /* If ucNotifyValue is set then either the task never entered the
4782 blocked state (because a notification was already pending) or the
4783 task unblocked because of a notification. Otherwise the task
4784 unblocked because of a timeout. */
4785 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4786 {
4787 /* A notification was not received. */
4788 xReturn = pdFALSE;
4789 }
4790 else
4791 {
4792 /* A notification was already pending or a notification was
4793 received while the task was waiting. */
4794 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
4795 xReturn = pdTRUE;
4796 }
4797
4798 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4799 }
4800 taskEXIT_CRITICAL();
4801
4802 return xReturn;
4803 }
4804
4805#endif /* configUSE_TASK_NOTIFICATIONS */
4806/*-----------------------------------------------------------*/
4807
4808#if( configUSE_TASK_NOTIFICATIONS == 1 )
4809
4810 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
4811 {
4812 TCB_t * pxTCB;
4813 BaseType_t xReturn = pdPASS;
4814 uint8_t ucOriginalNotifyState;
4815
4816 configASSERT( xTaskToNotify );
4817 pxTCB = xTaskToNotify;
4818
4819 taskENTER_CRITICAL();
4820 {
4821 if( pulPreviousNotificationValue != NULL )
4822 {
4823 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4824 }
4825
4826 ucOriginalNotifyState = pxTCB->ucNotifyState;
4827
4828 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4829
4830 switch( eAction )
4831 {
4832 case eSetBits :
4833 pxTCB->ulNotifiedValue |= ulValue;
4834 break;
4835
4836 case eIncrement :
4837 ( pxTCB->ulNotifiedValue )++;
4838 break;
4839
4840 case eSetValueWithOverwrite :
4841 pxTCB->ulNotifiedValue = ulValue;
4842 break;
4843
4844 case eSetValueWithoutOverwrite :
4845 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4846 {
4847 pxTCB->ulNotifiedValue = ulValue;
4848 }
4849 else
4850 {
4851 /* The value could not be written to the task. */
4852 xReturn = pdFAIL;
4853 }
4854 break;
4855
4856 case eNoAction:
4857 /* The task is being notified without its notify value being
4858 updated. */
4859 break;
4860
4861 default:
4862 /* Should not get here if all enums are handled.
4863 Artificially force an assert by testing a value the
4864 compiler can't assume is const. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08004865 configASSERT( pxTCB->ulNotifiedValue == ~0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004866
4867 break;
4868 }
4869
4870 traceTASK_NOTIFY();
4871
4872 /* If the task is in the blocked state specifically to wait for a
4873 notification then unblock it now. */
4874 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4875 {
4876 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4877 prvAddTaskToReadyList( pxTCB );
4878
4879 /* The task should not have been on an event list. */
4880 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4881
4882 #if( configUSE_TICKLESS_IDLE != 0 )
4883 {
4884 /* If a task is blocked waiting for a notification then
4885 xNextTaskUnblockTime might be set to the blocked task's time
4886 out time. If the task is unblocked for a reason other than
4887 a timeout xNextTaskUnblockTime is normally left unchanged,
4888 because it will automatically get reset to a new value when
4889 the tick count equals xNextTaskUnblockTime. However if
4890 tickless idling is used it might be more important to enter
4891 sleep mode at the earliest possible time - so reset
4892 xNextTaskUnblockTime here to ensure it is updated at the
4893 earliest possible time. */
4894 prvResetNextTaskUnblockTime();
4895 }
4896 #endif
4897
4898 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4899 {
4900 /* The notified task has a priority above the currently
4901 executing task so a yield is required. */
4902 taskYIELD_IF_USING_PREEMPTION();
4903 }
4904 else
4905 {
4906 mtCOVERAGE_TEST_MARKER();
4907 }
4908 }
4909 else
4910 {
4911 mtCOVERAGE_TEST_MARKER();
4912 }
4913 }
4914 taskEXIT_CRITICAL();
4915
4916 return xReturn;
4917 }
4918
4919#endif /* configUSE_TASK_NOTIFICATIONS */
4920/*-----------------------------------------------------------*/
4921
4922#if( configUSE_TASK_NOTIFICATIONS == 1 )
4923
4924 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
4925 {
4926 TCB_t * pxTCB;
4927 uint8_t ucOriginalNotifyState;
4928 BaseType_t xReturn = pdPASS;
4929 UBaseType_t uxSavedInterruptStatus;
4930
4931 configASSERT( xTaskToNotify );
4932
4933 /* RTOS ports that support interrupt nesting have the concept of a
4934 maximum system call (or maximum API call) interrupt priority.
4935 Interrupts that are above the maximum system call priority are keep
4936 permanently enabled, even when the RTOS kernel is in a critical section,
4937 but cannot make any calls to FreeRTOS API functions. If configASSERT()
4938 is defined in FreeRTOSConfig.h then
4939 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4940 failure if a FreeRTOS API function is called from an interrupt that has
4941 been assigned a priority above the configured maximum system call
4942 priority. Only FreeRTOS functions that end in FromISR can be called
4943 from interrupts that have been assigned a priority at or (logically)
4944 below the maximum system call interrupt priority. FreeRTOS maintains a
4945 separate interrupt safe API to ensure interrupt entry is as fast and as
4946 simple as possible. More information (albeit Cortex-M specific) is
4947 provided on the following link:
4948 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
4949 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4950
4951 pxTCB = xTaskToNotify;
4952
4953 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
4954 {
4955 if( pulPreviousNotificationValue != NULL )
4956 {
4957 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4958 }
4959
4960 ucOriginalNotifyState = pxTCB->ucNotifyState;
4961 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4962
4963 switch( eAction )
4964 {
4965 case eSetBits :
4966 pxTCB->ulNotifiedValue |= ulValue;
4967 break;
4968
4969 case eIncrement :
4970 ( pxTCB->ulNotifiedValue )++;
4971 break;
4972
4973 case eSetValueWithOverwrite :
4974 pxTCB->ulNotifiedValue = ulValue;
4975 break;
4976
4977 case eSetValueWithoutOverwrite :
4978 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4979 {
4980 pxTCB->ulNotifiedValue = ulValue;
4981 }
4982 else
4983 {
4984 /* The value could not be written to the task. */
4985 xReturn = pdFAIL;
4986 }
4987 break;
4988
4989 case eNoAction :
4990 /* The task is being notified without its notify value being
4991 updated. */
4992 break;
4993
4994 default:
4995 /* Should not get here if all enums are handled.
4996 Artificially force an assert by testing a value the
4997 compiler can't assume is const. */
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08004998 configASSERT( pxTCB->ulNotifiedValue == ~0U );
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08004999 break;
5000 }
5001
5002 traceTASK_NOTIFY_FROM_ISR();
5003
5004 /* If the task is in the blocked state specifically to wait for a
5005 notification then unblock it now. */
5006 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5007 {
5008 /* The task should not have been on an event list. */
5009 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5010
5011 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5012 {
5013 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5014 prvAddTaskToReadyList( pxTCB );
5015 }
5016 else
5017 {
5018 /* The delayed and ready lists cannot be accessed, so hold
5019 this task pending until the scheduler is resumed. */
5020 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5021 }
5022
5023 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5024 {
5025 /* The notified task has a priority above the currently
5026 executing task so a yield is required. */
5027 if( pxHigherPriorityTaskWoken != NULL )
5028 {
5029 *pxHigherPriorityTaskWoken = pdTRUE;
5030 }
5031
5032 /* Mark that a yield is pending in case the user is not
5033 using the "xHigherPriorityTaskWoken" parameter to an ISR
5034 safe FreeRTOS function. */
5035 xYieldPending = pdTRUE;
5036 }
5037 else
5038 {
5039 mtCOVERAGE_TEST_MARKER();
5040 }
5041 }
5042 }
5043 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5044
5045 return xReturn;
5046 }
5047
5048#endif /* configUSE_TASK_NOTIFICATIONS */
5049/*-----------------------------------------------------------*/
5050
5051#if( configUSE_TASK_NOTIFICATIONS == 1 )
5052
5053 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5054 {
5055 TCB_t * pxTCB;
5056 uint8_t ucOriginalNotifyState;
5057 UBaseType_t uxSavedInterruptStatus;
5058
5059 configASSERT( xTaskToNotify );
5060
5061 /* RTOS ports that support interrupt nesting have the concept of a
5062 maximum system call (or maximum API call) interrupt priority.
5063 Interrupts that are above the maximum system call priority are keep
5064 permanently enabled, even when the RTOS kernel is in a critical section,
5065 but cannot make any calls to FreeRTOS API functions. If configASSERT()
5066 is defined in FreeRTOSConfig.h then
5067 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5068 failure if a FreeRTOS API function is called from an interrupt that has
5069 been assigned a priority above the configured maximum system call
5070 priority. Only FreeRTOS functions that end in FromISR can be called
5071 from interrupts that have been assigned a priority at or (logically)
5072 below the maximum system call interrupt priority. FreeRTOS maintains a
5073 separate interrupt safe API to ensure interrupt entry is as fast and as
5074 simple as possible. More information (albeit Cortex-M specific) is
5075 provided on the following link:
5076 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
5077 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5078
5079 pxTCB = xTaskToNotify;
5080
5081 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
5082 {
5083 ucOriginalNotifyState = pxTCB->ucNotifyState;
5084 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
5085
5086 /* 'Giving' is equivalent to incrementing a count in a counting
5087 semaphore. */
5088 ( pxTCB->ulNotifiedValue )++;
5089
5090 traceTASK_NOTIFY_GIVE_FROM_ISR();
5091
5092 /* If the task is in the blocked state specifically to wait for a
5093 notification then unblock it now. */
5094 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5095 {
5096 /* The task should not have been on an event list. */
5097 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5098
5099 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5100 {
5101 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5102 prvAddTaskToReadyList( pxTCB );
5103 }
5104 else
5105 {
5106 /* The delayed and ready lists cannot be accessed, so hold
5107 this task pending until the scheduler is resumed. */
5108 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5109 }
5110
5111 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5112 {
5113 /* The notified task has a priority above the currently
5114 executing task so a yield is required. */
5115 if( pxHigherPriorityTaskWoken != NULL )
5116 {
5117 *pxHigherPriorityTaskWoken = pdTRUE;
5118 }
5119
5120 /* Mark that a yield is pending in case the user is not
5121 using the "xHigherPriorityTaskWoken" parameter in an ISR
5122 safe FreeRTOS function. */
5123 xYieldPending = pdTRUE;
5124 }
5125 else
5126 {
5127 mtCOVERAGE_TEST_MARKER();
5128 }
5129 }
5130 }
5131 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5132 }
5133
5134#endif /* configUSE_TASK_NOTIFICATIONS */
5135
5136/*-----------------------------------------------------------*/
5137
5138#if( configUSE_TASK_NOTIFICATIONS == 1 )
5139
5140 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
5141 {
5142 TCB_t *pxTCB;
5143 BaseType_t xReturn;
5144
5145 /* If null is passed in here then it is the calling task that is having
5146 its notification state cleared. */
5147 pxTCB = prvGetTCBFromHandle( xTask );
5148
5149 taskENTER_CRITICAL();
5150 {
5151 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
5152 {
5153 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5154 xReturn = pdPASS;
5155 }
5156 else
5157 {
5158 xReturn = pdFAIL;
5159 }
5160 }
5161 taskEXIT_CRITICAL();
5162
5163 return xReturn;
5164 }
5165
5166#endif /* configUSE_TASK_NOTIFICATIONS */
5167/*-----------------------------------------------------------*/
5168
5169#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5170 TickType_t xTaskGetIdleRunTimeCounter( void )
5171 {
5172 return xIdleTaskHandle->ulRunTimeCounter;
5173 }
5174#endif
5175/*-----------------------------------------------------------*/
5176
5177static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
5178{
5179TickType_t xTimeToWake;
5180const TickType_t xConstTickCount = xTickCount;
5181
5182 #if( INCLUDE_xTaskAbortDelay == 1 )
5183 {
5184 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5185 reset to pdFALSE so it can be detected as having been set to pdTRUE
5186 when the task leaves the Blocked state. */
5187 pxCurrentTCB->ucDelayAborted = pdFALSE;
5188 }
5189 #endif
5190
5191 /* Remove the task from the ready list before adding it to the blocked list
5192 as the same list item is used for both lists. */
5193 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
5194 {
5195 /* The current task must be in a ready list, so there is no need to
5196 check, and the port reset macro can be called directly. */
5197 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5198 }
5199 else
5200 {
5201 mtCOVERAGE_TEST_MARKER();
5202 }
5203
5204 #if ( INCLUDE_vTaskSuspend == 1 )
5205 {
5206 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
5207 {
5208 /* Add the task to the suspended task list instead of a delayed task
5209 list to ensure it is not woken by a timing event. It will block
5210 indefinitely. */
5211 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
5212 }
5213 else
5214 {
5215 /* Calculate the time at which the task should be woken if the event
5216 does not occur. This may overflow but this doesn't matter, the
5217 kernel will manage it correctly. */
5218 xTimeToWake = xConstTickCount + xTicksToWait;
5219
5220 /* The list item will be inserted in wake time order. */
5221 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5222
5223 if( xTimeToWake < xConstTickCount )
5224 {
5225 /* Wake time has overflowed. Place this item in the overflow
5226 list. */
5227 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5228 }
5229 else
5230 {
5231 /* The wake time has not overflowed, so the current block list
5232 is used. */
5233 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5234
5235 /* If the task entering the blocked state was placed at the
5236 head of the list of blocked tasks then xNextTaskUnblockTime
5237 needs to be updated too. */
5238 if( xTimeToWake < xNextTaskUnblockTime )
5239 {
5240 xNextTaskUnblockTime = xTimeToWake;
5241 }
5242 else
5243 {
5244 mtCOVERAGE_TEST_MARKER();
5245 }
5246 }
5247 }
5248 }
5249 #else /* INCLUDE_vTaskSuspend */
5250 {
5251 /* Calculate the time at which the task should be woken if the event
5252 does not occur. This may overflow but this doesn't matter, the kernel
5253 will manage it correctly. */
5254 xTimeToWake = xConstTickCount + xTicksToWait;
5255
5256 /* The list item will be inserted in wake time order. */
5257 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5258
5259 if( xTimeToWake < xConstTickCount )
5260 {
5261 /* Wake time has overflowed. Place this item in the overflow list. */
5262 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5263 }
5264 else
5265 {
5266 /* The wake time has not overflowed, so the current block list is used. */
5267 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5268
5269 /* If the task entering the blocked state was placed at the head of the
5270 list of blocked tasks then xNextTaskUnblockTime needs to be updated
5271 too. */
5272 if( xTimeToWake < xNextTaskUnblockTime )
5273 {
5274 xNextTaskUnblockTime = xTimeToWake;
5275 }
5276 else
5277 {
5278 mtCOVERAGE_TEST_MARKER();
5279 }
5280 }
5281
5282 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
5283 ( void ) xCanBlockIndefinitely;
5284 }
5285 #endif /* INCLUDE_vTaskSuspend */
5286}
5287
5288/* Code below here allows additional code to be inserted into this source file,
5289especially where access to file scope functions and data is needed (for example
5290when performing module tests). */
5291
5292#ifdef FREERTOS_MODULE_TEST
5293 #include "tasks_test_access_functions.h"
5294#endif
5295
5296
5297#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
5298
5299 #include "freertos_tasks_c_additions.h"
5300
5301 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
5302 static void freertos_tasks_c_additions_init( void )
5303 {
5304 FREERTOS_TASKS_C_ADDITIONS_INIT();
5305 }
5306 #endif
5307
5308#endif
5309
shijie.xiong392d3962022-03-17 14:07:27 +08005310#if CONFIG_BACKTRACE
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08005311void task_stack_range(TaskHandle_t xTask, unsigned long *low, unsigned long *high);
5312void task_stack_range(TaskHandle_t xTask, unsigned long *low, unsigned long *high)
5313{
5314 TCB_t *pxTCB;
5315 pxTCB = prvGetTCBFromHandle( xTask );
5316 *low = (unsigned long)pxTCB->pxStack;
5317 *high = *low + pxTCB->uStackDepth * sizeof(StackType_t);
5318}
5319#endif
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08005320
Xiaohu.Huang2c96ef42021-10-15 16:12:27 +08005321#if ENABLE_KASAN
5322void kasan_enable_current(void)
5323{
5324 if (pxCurrentTCB)
5325 pxCurrentTCB->kasan_depth--;
5326}
5327
5328void kasan_disable_current(void)
5329{
5330 if (pxCurrentTCB)
5331 pxCurrentTCB->kasan_depth++;
5332}
5333
5334int kasan_current_enabled(void)
5335{
5336 if (pxCurrentTCB)
5337 return (pxCurrentTCB->kasan_depth<=0);
5338 return 0;
5339}
5340#endif
Xiaohu.Huangc9a7d4f2022-03-15 13:48:38 +08005341
5342/* Add include implement source code which depend on the inner elements */
5343#include "aml_tasks_ext.c"