blob: 660039cdd9fd34a19e627d3f484493b5a77ca971 [file] [log] [blame]
kelvin.zhang57fb6ae2021-10-15 10:19:42 +08001/*
2 * FreeRTOS Kernel V10.2.1
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * http://www.FreeRTOS.org
23 * http://aws.amazon.com/freertos
24 *
25 * 1 tab == 4 spaces!
26 */
27
28/* Standard includes. */
29#include <stdlib.h>
30#include <string.h>
31
32/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33all the API functions to use the MPU wrappers. That should only be done when
34task.h is included from an application file. */
35#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37/* FreeRTOS includes. */
38#include "FreeRTOS.h"
39#include "task.h"
40#include "timers.h"
41#include "stack_macros.h"
42
43/* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
44because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
45for the header files above, but not in this file, in order to generate the
46correct privileged Vs unprivileged linkage and placement. */
47#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
48
49/* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
50functions but without including stdio.h here. */
51#if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
52 /* At the bottom of this file are two optional functions that can be used
53 to generate human readable text from the raw data generated by the
54 uxTaskGetSystemState() function. Note the formatting functions are provided
55 for convenience only, and are NOT considered part of the kernel. */
56 #include <stdio.h>
57#endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
58
59#if( configUSE_PREEMPTION == 0 )
60 /* If the cooperative scheduler is being used then a yield should not be
61 performed just because a higher priority task has been woken. */
62 #define taskYIELD_IF_USING_PREEMPTION()
63#else
64 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
65#endif
66
67/* Values that can be assigned to the ucNotifyState member of the TCB. */
68#define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
69#define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
70#define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
71
72/*
73 * The value used to fill the stack of a task when the task is created. This
74 * is used purely for checking the high water mark for tasks.
75 */
76#define tskSTACK_FILL_BYTE ( 0xa5U )
77
78/* Bits used to recored how a task's stack and TCB were allocated. */
79#define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
80#define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
81#define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
82
83/* If any of the following are set then task stacks are filled with a known
84value so the high water mark can be determined. If none of the following are
85set then don't fill the stack so there is no unnecessary dependency on memset. */
86#if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
87 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
88#else
89 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
90#endif
91
92/*
93 * Macros used by vListTask to indicate which state a task is in.
94 */
95#define tskRUNNING_CHAR ( 'X' )
96#define tskBLOCKED_CHAR ( 'B' )
97#define tskREADY_CHAR ( 'R' )
98#define tskDELETED_CHAR ( 'D' )
99#define tskSUSPENDED_CHAR ( 'S' )
100
101/*
102 * Some kernel aware debuggers require the data the debugger needs access to be
103 * global, rather than file scope.
104 */
105#ifdef portREMOVE_STATIC_QUALIFIER
106 #define static
107#endif
108
109/* The name allocated to the Idle task. This can be overridden by defining
110configIDLE_TASK_NAME in FreeRTOSConfig.h. */
111#ifndef configIDLE_TASK_NAME
112 #define configIDLE_TASK_NAME "IDLE"
113#endif
114
115#if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
116
117 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
118 performed in a generic way that is not optimised to any particular
119 microcontroller architecture. */
120
121 /* uxTopReadyPriority holds the priority of the highest priority ready
122 state task. */
123 #define taskRECORD_READY_PRIORITY( uxPriority ) \
124 { \
125 if( ( uxPriority ) > uxTopReadyPriority ) \
126 { \
127 uxTopReadyPriority = ( uxPriority ); \
128 } \
129 } /* taskRECORD_READY_PRIORITY */
130
131 /*-----------------------------------------------------------*/
132
133 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
134 { \
135 UBaseType_t uxTopPriority = uxTopReadyPriority; \
136 \
137 /* Find the highest priority queue that contains ready tasks. */ \
138 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
139 { \
140 configASSERT( uxTopPriority ); \
141 --uxTopPriority; \
142 } \
143 \
144 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
145 the same priority get an equal share of the processor time. */ \
146 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
147 uxTopReadyPriority = uxTopPriority; \
148 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
149
150 /*-----------------------------------------------------------*/
151
152 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
153 they are only required when a port optimised method of task selection is
154 being used. */
155 #define taskRESET_READY_PRIORITY( uxPriority )
156 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
157
158#else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
159
160 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
161 performed in a way that is tailored to the particular microcontroller
162 architecture being used. */
163
164 /* A port optimised version is provided. Call the port defined macros. */
165 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
166
167 /*-----------------------------------------------------------*/
168
169 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
170 { \
171 UBaseType_t uxTopPriority; \
172 \
173 /* Find the highest priority list that contains ready tasks. */ \
174 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
175 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
176 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
177 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
178
179 /*-----------------------------------------------------------*/
180
181 /* A port optimised version is provided, call it only if the TCB being reset
182 is being referenced from a ready list. If it is referenced from a delayed
183 or suspended list then it won't be in a ready list. */
184 #define taskRESET_READY_PRIORITY( uxPriority ) \
185 { \
186 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
187 { \
188 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
189 } \
190 }
191
192#endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
193
194/*-----------------------------------------------------------*/
195
196/* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
197count overflows. */
198#define taskSWITCH_DELAYED_LISTS() \
199{ \
200 List_t *pxTemp; \
201 \
202 /* The delayed tasks list should be empty when the lists are switched. */ \
203 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
204 \
205 pxTemp = pxDelayedTaskList; \
206 pxDelayedTaskList = pxOverflowDelayedTaskList; \
207 pxOverflowDelayedTaskList = pxTemp; \
208 xNumOfOverflows++; \
209 prvResetNextTaskUnblockTime(); \
210}
211
212/*-----------------------------------------------------------*/
213
214/*
215 * Place the task represented by pxTCB into the appropriate ready list for
216 * the task. It is inserted at the end of the list.
217 */
218#define prvAddTaskToReadyList( pxTCB ) \
219 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
220 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
221 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
222 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
223/*-----------------------------------------------------------*/
224
225/*
226 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
227 * where NULL is used to indicate that the handle of the currently executing
228 * task should be used in place of the parameter. This macro simply checks to
229 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
230 */
231#define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
232
233/* The item value of the event list item is normally used to hold the priority
234of the task to which it belongs (coded to allow it to be held in reverse
235priority order). However, it is occasionally borrowed for other purposes. It
236is important its value is not updated due to a task priority change while it is
237being used for another purpose. The following bit definition is used to inform
238the scheduler that the value should not be changed - in which case it is the
239responsibility of whichever module is using the value to ensure it gets set back
240to its original value when it is released. */
241#if( configUSE_16_BIT_TICKS == 1 )
242 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
243#else
244 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
245#endif
246
247/*
248 * Task control block. A task control block (TCB) is allocated for each task,
249 * and stores task state information, including a pointer to the task's context
250 * (the task's run time environment, including register values)
251 */
252typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
253{
254 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
255
256 #if ( portUSING_MPU_WRAPPERS == 1 )
257 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
258 #endif
259
260 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
261 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
262 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
263 StackType_t *pxStack; /*< Points to the start of the stack. */
264 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
265
266 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
267 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
268 #endif
269
270 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
271 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
272 #endif
273
274 #if ( configUSE_TRACE_FACILITY == 1 )
275 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
276 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
277 #endif
278
279 #if ( configUSE_MUTEXES == 1 )
280 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
281 UBaseType_t uxMutexesHeld;
282 #endif
283
284 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
285 TaskHookFunction_t pxTaskTag;
286 #endif
287
288 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
289 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
290 #endif
291
292 #if( configGENERATE_RUN_TIME_STATS == 1 )
293 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
294 #endif
295
296 #if ( configUSE_NEWLIB_REENTRANT == 1 )
297 /* Allocate a Newlib reent structure that is specific to this task.
298 Note Newlib support has been included by popular demand, but is not
299 used by the FreeRTOS maintainers themselves. FreeRTOS is not
300 responsible for resulting newlib operation. User must be familiar with
301 newlib and must provide system-wide implementations of the necessary
302 stubs. Be warned that (at the time of writing) the current newlib design
303 implements a system-wide malloc() that must be provided with locks. */
304 struct _reent xNewLib_reent;
305 #endif
306
307 #if( configUSE_TASK_NOTIFICATIONS == 1 )
308 volatile uint32_t ulNotifiedValue;
309 volatile uint8_t ucNotifyState;
310 #endif
311
312 /* See the comments in FreeRTOS.h with the definition of
313 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
314 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
315 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
316 #endif
317
318 #if( INCLUDE_xTaskAbortDelay == 1 )
319 uint8_t ucDelayAborted;
320 #endif
321
322 #if( configUSE_POSIX_ERRNO == 1 )
323 int iTaskErrno;
324 #endif
325
326} tskTCB;
327
328/* The old tskTCB name is maintained above then typedefed to the new TCB_t name
329below to enable the use of older kernel aware debuggers. */
330typedef tskTCB TCB_t;
331
332/*lint -save -e956 A manual analysis and inspection has been used to determine
333which static variables must be declared volatile. */
334PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
335
336/* Lists for ready and blocked tasks. --------------------
337xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
338doing so breaks some kernel aware debuggers and debuggers that rely on removing
339the static qualifier. */
340PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
341PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
342PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
343PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
344PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
345PRIVILEGED_DATA static List_t xPendingReadyList; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
346
347#if( INCLUDE_vTaskDelete == 1 )
348
349 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
350 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
351
352#endif
353
354#if ( INCLUDE_vTaskSuspend == 1 )
355
356 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
357
358#endif
359
360/* Global POSIX errno. Its value is changed upon context switching to match
361the errno of the currently running task. */
362#if ( configUSE_POSIX_ERRNO == 1 )
363 int FreeRTOS_errno = 0;
364#endif
365
366/* Other file private variables. --------------------------------*/
367PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
368PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
369PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
370PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
371PRIVILEGED_DATA static volatile UBaseType_t uxPendedTicks = ( UBaseType_t ) 0U;
372PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
373PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
374PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
375PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
376PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
377
378/* Context switches are held pending while the scheduler is suspended. Also,
379interrupts must not manipulate the xStateListItem of a TCB, or any of the
380lists the xStateListItem can be referenced from, if the scheduler is suspended.
381If an interrupt needs to unblock a task while the scheduler is suspended then it
382moves the task's event list item into the xPendingReadyList, ready for the
383kernel to move the task from the pending ready list into the real ready list
384when the scheduler is unsuspended. The pending ready list itself can only be
385accessed from a critical section. */
386PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) pdFALSE;
387
388#if ( configGENERATE_RUN_TIME_STATS == 1 )
389
390 /* Do not move these variables to function scope as doing so prevents the
391 code working with debuggers that need to remove the static qualifier. */
392 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime = 0UL; /*< Holds the value of a timer/counter the last time a task was switched in. */
393 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
394
395#endif
396
397/*lint -restore */
398
399/*-----------------------------------------------------------*/
400
401/* Callback function prototypes. --------------------------*/
402#if( configCHECK_FOR_STACK_OVERFLOW > 0 )
403
404 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
405
406#endif
407
408#if( configUSE_TICK_HOOK > 0 )
409
410 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
411
412#endif
413
414#if( configSUPPORT_STATIC_ALLOCATION == 1 )
415
416 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
417
418#endif
419
420/* File private functions. --------------------------------*/
421
422/**
423 * Utility task that simply returns pdTRUE if the task referenced by xTask is
424 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
425 * is in any other state.
426 */
427#if ( INCLUDE_vTaskSuspend == 1 )
428
429 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
430
431#endif /* INCLUDE_vTaskSuspend */
432
433/*
434 * Utility to ready all the lists used by the scheduler. This is called
435 * automatically upon the creation of the first task.
436 */
437static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
438
439/*
440 * The idle task, which as all tasks is implemented as a never ending loop.
441 * The idle task is automatically created and added to the ready lists upon
442 * creation of the first user task.
443 *
444 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
445 * language extensions. The equivalent prototype for this function is:
446 *
447 * void prvIdleTask( void *pvParameters );
448 *
449 */
450static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
451
452/*
453 * Utility to free all memory allocated by the scheduler to hold a TCB,
454 * including the stack pointed to by the TCB.
455 *
456 * This does not free memory allocated by the task itself (i.e. memory
457 * allocated by calls to pvPortMalloc from within the tasks application code).
458 */
459#if ( INCLUDE_vTaskDelete == 1 )
460
461 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
462
463#endif
464
465/*
466 * Used only by the idle task. This checks to see if anything has been placed
467 * in the list of tasks waiting to be deleted. If so the task is cleaned up
468 * and its TCB deleted.
469 */
470static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
471
472/*
473 * The currently executing task is entering the Blocked state. Add the task to
474 * either the current or the overflow delayed task list.
475 */
476static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
477
478/*
479 * Fills an TaskStatus_t structure with information on each task that is
480 * referenced from the pxList list (which may be a ready list, a delayed list,
481 * a suspended list, etc.).
482 *
483 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
484 * NORMAL APPLICATION CODE.
485 */
486#if ( configUSE_TRACE_FACILITY == 1 )
487
488 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
489
490#endif
491
492/*
493 * Searches pxList for a task with name pcNameToQuery - returning a handle to
494 * the task if it is found, or NULL if the task is not found.
495 */
496#if ( INCLUDE_xTaskGetHandle == 1 )
497
498 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
499
500#endif
501
502/*
503 * When a task is created, the stack of the task is filled with a known value.
504 * This function determines the 'high water mark' of the task stack by
505 * determining how much of the stack remains at the original preset value.
506 */
507#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
508
509 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
510
511#endif
512
513/*
514 * Return the amount of time, in ticks, that will pass before the kernel will
515 * next move a task from the Blocked state to the Running state.
516 *
517 * This conditional compilation should use inequality to 0, not equality to 1.
518 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
519 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
520 * set to a value other than 1.
521 */
522#if ( configUSE_TICKLESS_IDLE != 0 )
523
524 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
525
526#endif
527
528/*
529 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
530 * will exit the Blocked state.
531 */
532static void prvResetNextTaskUnblockTime( void );
533
534#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
535
536 /*
537 * Helper function used to pad task names with spaces when printing out
538 * human readable tables of task information.
539 */
540 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
541
542#endif
543
544/*
545 * Called after a Task_t structure has been allocated either statically or
546 * dynamically to fill in the structure's members.
547 */
548static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
549 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
550 const uint32_t ulStackDepth,
551 void * const pvParameters,
552 UBaseType_t uxPriority,
553 TaskHandle_t * const pxCreatedTask,
554 TCB_t *pxNewTCB,
555 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
556
557/*
558 * Called after a new task has been created and initialised to place the task
559 * under the control of the scheduler.
560 */
561static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB ) PRIVILEGED_FUNCTION;
562
563/*
564 * freertos_tasks_c_additions_init() should only be called if the user definable
565 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
566 * called by the function.
567 */
568#ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
569
570 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
571
572#endif
573
574/*-----------------------------------------------------------*/
575
576#if( configSUPPORT_STATIC_ALLOCATION == 1 )
577
578 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
579 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
580 const uint32_t ulStackDepth,
581 void * const pvParameters,
582 UBaseType_t uxPriority,
583 StackType_t * const puxStackBuffer,
584 StaticTask_t * const pxTaskBuffer )
585 {
586 TCB_t *pxNewTCB;
587 TaskHandle_t xReturn;
588
589 configASSERT( puxStackBuffer != NULL );
590 configASSERT( pxTaskBuffer != NULL );
591
592 #if( configASSERT_DEFINED == 1 )
593 {
594 /* Sanity check that the size of the structure used to declare a
595 variable of type StaticTask_t equals the size of the real task
596 structure. */
597 volatile size_t xSize = sizeof( StaticTask_t );
598 configASSERT( xSize == sizeof( TCB_t ) );
599 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
600 }
601 #endif /* configASSERT_DEFINED */
602
603
604 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
605 {
606 /* The memory used for the task's TCB and stack are passed into this
607 function - use them. */
608 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
609 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
610
611 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
612 {
613 /* Tasks can be created statically or dynamically, so note this
614 task was created statically in case the task is later deleted. */
615 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
616 }
617 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
618
619 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
620 prvAddNewTaskToReadyList( pxNewTCB );
621 }
622 else
623 {
624 xReturn = NULL;
625 }
626
627 return xReturn;
628 }
629
630#endif /* SUPPORT_STATIC_ALLOCATION */
631/*-----------------------------------------------------------*/
632
633#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
634
635 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
636 {
637 TCB_t *pxNewTCB;
638 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
639
640 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
641 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
642
643 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
644 {
645 /* Allocate space for the TCB. Where the memory comes from depends
646 on the implementation of the port malloc function and whether or
647 not static allocation is being used. */
648 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
649
650 /* Store the stack location in the TCB. */
651 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
652
653 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
654 {
655 /* Tasks can be created statically or dynamically, so note this
656 task was created statically in case the task is later deleted. */
657 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
658 }
659 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
660
661 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
662 pxTaskDefinition->pcName,
663 ( uint32_t ) pxTaskDefinition->usStackDepth,
664 pxTaskDefinition->pvParameters,
665 pxTaskDefinition->uxPriority,
666 pxCreatedTask, pxNewTCB,
667 pxTaskDefinition->xRegions );
668
669 prvAddNewTaskToReadyList( pxNewTCB );
670 xReturn = pdPASS;
671 }
672
673 return xReturn;
674 }
675
676#endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
677/*-----------------------------------------------------------*/
678
679#if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
680
681 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
682 {
683 TCB_t *pxNewTCB;
684 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
685
686 configASSERT( pxTaskDefinition->puxStackBuffer );
687
688 if( pxTaskDefinition->puxStackBuffer != NULL )
689 {
690 /* Allocate space for the TCB. Where the memory comes from depends
691 on the implementation of the port malloc function and whether or
692 not static allocation is being used. */
693 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
694
695 if( pxNewTCB != NULL )
696 {
697 /* Store the stack location in the TCB. */
698 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
699
700 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
701 {
702 /* Tasks can be created statically or dynamically, so note
703 this task had a statically allocated stack in case it is
704 later deleted. The TCB was allocated dynamically. */
705 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
706 }
707 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
708
709 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
710 pxTaskDefinition->pcName,
711 ( uint32_t ) pxTaskDefinition->usStackDepth,
712 pxTaskDefinition->pvParameters,
713 pxTaskDefinition->uxPriority,
714 pxCreatedTask, pxNewTCB,
715 pxTaskDefinition->xRegions );
716
717 prvAddNewTaskToReadyList( pxNewTCB );
718 xReturn = pdPASS;
719 }
720 }
721
722 return xReturn;
723 }
724
725#endif /* portUSING_MPU_WRAPPERS */
726/*-----------------------------------------------------------*/
727
728#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
729
730 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
731 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
732 const configSTACK_DEPTH_TYPE usStackDepth,
733 void * const pvParameters,
734 UBaseType_t uxPriority,
735 TaskHandle_t * const pxCreatedTask )
736 {
737 TCB_t *pxNewTCB;
738 BaseType_t xReturn;
739
740 /* If the stack grows down then allocate the stack then the TCB so the stack
741 does not grow into the TCB. Likewise if the stack grows up then allocate
742 the TCB then the stack. */
743 #if( portSTACK_GROWTH > 0 )
744 {
745 /* Allocate space for the TCB. Where the memory comes from depends on
746 the implementation of the port malloc function and whether or not static
747 allocation is being used. */
748 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
749
750 if( pxNewTCB != NULL )
751 {
752 /* Allocate space for the stack used by the task being created.
753 The base of the stack memory stored in the TCB so the task can
754 be deleted later if required. */
755 pxNewTCB->pxStack = ( StackType_t * ) pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
756
757 if( pxNewTCB->pxStack == NULL )
758 {
759 /* Could not allocate the stack. Delete the allocated TCB. */
760 vPortFree( pxNewTCB );
761 pxNewTCB = NULL;
762 }
763 }
764 }
765 #else /* portSTACK_GROWTH */
766 {
767 StackType_t *pxStack;
768
769 /* Allocate space for the stack used by the task being created. */
770 pxStack = pvPortMalloc( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
771
772 if( pxStack != NULL )
773 {
774 /* Allocate space for the TCB. */
775 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
776
777 if( pxNewTCB != NULL )
778 {
779 /* Store the stack location in the TCB. */
780 pxNewTCB->pxStack = pxStack;
781 }
782 else
783 {
784 /* The stack cannot be used as the TCB was not created. Free
785 it again. */
786 vPortFree( pxStack );
787 }
788 }
789 else
790 {
791 pxNewTCB = NULL;
792 }
793 }
794 #endif /* portSTACK_GROWTH */
795
796 if( pxNewTCB != NULL )
797 {
798 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
799 {
800 /* Tasks can be created statically or dynamically, so note this
801 task was created dynamically in case it is later deleted. */
802 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
803 }
804 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
805
806 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
807 prvAddNewTaskToReadyList( pxNewTCB );
808 xReturn = pdPASS;
809 }
810 else
811 {
812 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
813 }
814
815 return xReturn;
816 }
817
818#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
819/*-----------------------------------------------------------*/
820
821static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
822 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
823 const uint32_t ulStackDepth,
824 void * const pvParameters,
825 UBaseType_t uxPriority,
826 TaskHandle_t * const pxCreatedTask,
827 TCB_t *pxNewTCB,
828 const MemoryRegion_t * const xRegions )
829{
830StackType_t *pxTopOfStack;
831UBaseType_t x;
832
833 #if( portUSING_MPU_WRAPPERS == 1 )
834 /* Should the task be created in privileged mode? */
835 BaseType_t xRunPrivileged;
836 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
837 {
838 xRunPrivileged = pdTRUE;
839 }
840 else
841 {
842 xRunPrivileged = pdFALSE;
843 }
844 uxPriority &= ~portPRIVILEGE_BIT;
845 #endif /* portUSING_MPU_WRAPPERS == 1 */
846
847 /* Avoid dependency on memset() if it is not required. */
848 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
849 {
850 /* Fill the stack with a known value to assist debugging. */
851 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
852 }
853 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
854
855 /* Calculate the top of stack address. This depends on whether the stack
856 grows from high memory to low (as per the 80x86) or vice versa.
857 portSTACK_GROWTH is used to make the result positive or negative as required
858 by the port. */
859 #if( portSTACK_GROWTH < 0 )
860 {
861 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
862 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
863
864 /* Check the alignment of the calculated top of stack is correct. */
865 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
866
867 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
868 {
869 /* Also record the stack's high address, which may assist
870 debugging. */
871 pxNewTCB->pxEndOfStack = pxTopOfStack;
872 }
873 #endif /* configRECORD_STACK_HIGH_ADDRESS */
874 }
875 #else /* portSTACK_GROWTH */
876 {
877 pxTopOfStack = pxNewTCB->pxStack;
878
879 /* Check the alignment of the stack buffer is correct. */
880 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
881
882 /* The other extreme of the stack space is required if stack checking is
883 performed. */
884 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
885 }
886 #endif /* portSTACK_GROWTH */
887
888 /* Store the task name in the TCB. */
889 if( pcName != NULL )
890 {
891 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
892 {
893 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
894
895 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
896 configMAX_TASK_NAME_LEN characters just in case the memory after the
897 string is not accessible (extremely unlikely). */
898 if( pcName[ x ] == ( char ) 0x00 )
899 {
900 break;
901 }
902 else
903 {
904 mtCOVERAGE_TEST_MARKER();
905 }
906 }
907
908 /* Ensure the name string is terminated in the case that the string length
909 was greater or equal to configMAX_TASK_NAME_LEN. */
910 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
911 }
912 else
913 {
914 /* The task has not been given a name, so just ensure there is a NULL
915 terminator when it is read out. */
916 pxNewTCB->pcTaskName[ 0 ] = 0x00;
917 }
918
919 /* This is used as an array index so must ensure it's not too large. First
920 remove the privilege bit if one is present. */
921 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
922 {
923 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
924 }
925 else
926 {
927 mtCOVERAGE_TEST_MARKER();
928 }
929
930 pxNewTCB->uxPriority = uxPriority;
931 #if ( configUSE_MUTEXES == 1 )
932 {
933 pxNewTCB->uxBasePriority = uxPriority;
934 pxNewTCB->uxMutexesHeld = 0;
935 }
936 #endif /* configUSE_MUTEXES */
937
938 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
939 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
940
941 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
942 back to the containing TCB from a generic item in a list. */
943 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
944
945 /* Event lists are always in priority order. */
946 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
947 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
948
949 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
950 {
951 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
952 }
953 #endif /* portCRITICAL_NESTING_IN_TCB */
954
955 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
956 {
957 pxNewTCB->pxTaskTag = NULL;
958 }
959 #endif /* configUSE_APPLICATION_TASK_TAG */
960
961 #if ( configGENERATE_RUN_TIME_STATS == 1 )
962 {
963 pxNewTCB->ulRunTimeCounter = 0UL;
964 }
965 #endif /* configGENERATE_RUN_TIME_STATS */
966
967 #if ( portUSING_MPU_WRAPPERS == 1 )
968 {
969 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
970 }
971 #else
972 {
973 /* Avoid compiler warning about unreferenced parameter. */
974 ( void ) xRegions;
975 }
976 #endif
977
978 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
979 {
980 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
981 {
982 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
983 }
984 }
985 #endif
986
987 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
988 {
989 pxNewTCB->ulNotifiedValue = 0;
990 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
991 }
992 #endif
993
994 #if ( configUSE_NEWLIB_REENTRANT == 1 )
995 {
996 /* Initialise this task's Newlib reent structure. */
997 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
998 }
999 #endif
1000
1001 #if( INCLUDE_xTaskAbortDelay == 1 )
1002 {
1003 pxNewTCB->ucDelayAborted = pdFALSE;
1004 }
1005 #endif
1006
1007 /* Initialize the TCB stack to look as if the task was already running,
1008 but had been interrupted by the scheduler. The return address is set
1009 to the start of the task function. Once the stack has been initialised
1010 the top of stack variable is updated. */
1011 #if( portUSING_MPU_WRAPPERS == 1 )
1012 {
1013 /* If the port has capability to detect stack overflow,
1014 pass the stack end address to the stack initialization
1015 function as well. */
1016 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1017 {
1018 #if( portSTACK_GROWTH < 0 )
1019 {
1020 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1021 }
1022 #else /* portSTACK_GROWTH */
1023 {
1024 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1025 }
1026 #endif /* portSTACK_GROWTH */
1027 }
1028 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1029 {
1030 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1031 }
1032 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1033 }
1034 #else /* portUSING_MPU_WRAPPERS */
1035 {
1036 /* If the port has capability to detect stack overflow,
1037 pass the stack end address to the stack initialization
1038 function as well. */
1039 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1040 {
1041 #if( portSTACK_GROWTH < 0 )
1042 {
1043 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1044 }
1045 #else /* portSTACK_GROWTH */
1046 {
1047 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1048 }
1049 #endif /* portSTACK_GROWTH */
1050 }
1051 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1052 {
1053 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1054 }
1055 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1056 }
1057 #endif /* portUSING_MPU_WRAPPERS */
1058
1059 if( pxCreatedTask != NULL )
1060 {
1061 /* Pass the handle out in an anonymous way. The handle can be used to
1062 change the created task's priority, delete the created task, etc.*/
1063 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1064 }
1065 else
1066 {
1067 mtCOVERAGE_TEST_MARKER();
1068 }
1069}
1070/*-----------------------------------------------------------*/
1071
1072static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB )
1073{
1074 /* Ensure interrupts don't access the task lists while the lists are being
1075 updated. */
1076 taskENTER_CRITICAL();
1077 {
1078 uxCurrentNumberOfTasks++;
1079 if( pxCurrentTCB == NULL )
1080 {
1081 /* There are no other tasks, or all the other tasks are in
1082 the suspended state - make this the current task. */
1083 pxCurrentTCB = pxNewTCB;
1084
1085 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1086 {
1087 /* This is the first task to be created so do the preliminary
1088 initialisation required. We will not recover if this call
1089 fails, but we will report the failure. */
1090 prvInitialiseTaskLists();
1091 }
1092 else
1093 {
1094 mtCOVERAGE_TEST_MARKER();
1095 }
1096 }
1097 else
1098 {
1099 /* If the scheduler is not already running, make this task the
1100 current task if it is the highest priority task to be created
1101 so far. */
1102 if( xSchedulerRunning == pdFALSE )
1103 {
1104 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1105 {
1106 pxCurrentTCB = pxNewTCB;
1107 }
1108 else
1109 {
1110 mtCOVERAGE_TEST_MARKER();
1111 }
1112 }
1113 else
1114 {
1115 mtCOVERAGE_TEST_MARKER();
1116 }
1117 }
1118
1119 uxTaskNumber++;
1120
1121 #if ( configUSE_TRACE_FACILITY == 1 )
1122 {
1123 /* Add a counter into the TCB for tracing only. */
1124 pxNewTCB->uxTCBNumber = uxTaskNumber;
1125 }
1126 #endif /* configUSE_TRACE_FACILITY */
1127 traceTASK_CREATE( pxNewTCB );
1128
1129 prvAddTaskToReadyList( pxNewTCB );
1130
1131 portSETUP_TCB( pxNewTCB );
1132 }
1133 taskEXIT_CRITICAL();
1134
1135 if( xSchedulerRunning != pdFALSE )
1136 {
1137 /* If the created task is of a higher priority than the current task
1138 then it should run now. */
1139 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
1140 {
1141 taskYIELD_IF_USING_PREEMPTION();
1142 }
1143 else
1144 {
1145 mtCOVERAGE_TEST_MARKER();
1146 }
1147 }
1148 else
1149 {
1150 mtCOVERAGE_TEST_MARKER();
1151 }
1152}
1153/*-----------------------------------------------------------*/
1154
1155#if ( INCLUDE_vTaskDelete == 1 )
1156
1157 void vTaskDelete( TaskHandle_t xTaskToDelete )
1158 {
1159 TCB_t *pxTCB;
1160
1161 taskENTER_CRITICAL();
1162 {
1163 /* If null is passed in here then it is the calling task that is
1164 being deleted. */
1165 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1166
1167 /* Remove task from the ready list. */
1168 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1169 {
1170 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1171 }
1172 else
1173 {
1174 mtCOVERAGE_TEST_MARKER();
1175 }
1176
1177 /* Is the task waiting on an event also? */
1178 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1179 {
1180 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1181 }
1182 else
1183 {
1184 mtCOVERAGE_TEST_MARKER();
1185 }
1186
1187 /* Increment the uxTaskNumber also so kernel aware debuggers can
1188 detect that the task lists need re-generating. This is done before
1189 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1190 not return. */
1191 uxTaskNumber++;
1192
1193 if( pxTCB == pxCurrentTCB )
1194 {
1195 /* A task is deleting itself. This cannot complete within the
1196 task itself, as a context switch to another task is required.
1197 Place the task in the termination list. The idle task will
1198 check the termination list and free up any memory allocated by
1199 the scheduler for the TCB and stack of the deleted task. */
1200 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1201
1202 /* Increment the ucTasksDeleted variable so the idle task knows
1203 there is a task that has been deleted and that it should therefore
1204 check the xTasksWaitingTermination list. */
1205 ++uxDeletedTasksWaitingCleanUp;
1206
1207 /* The pre-delete hook is primarily for the Windows simulator,
1208 in which Windows specific clean up operations are performed,
1209 after which it is not possible to yield away from this task -
1210 hence xYieldPending is used to latch that a context switch is
1211 required. */
1212 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1213 }
1214 else
1215 {
1216 --uxCurrentNumberOfTasks;
1217 prvDeleteTCB( pxTCB );
1218
1219 /* Reset the next expected unblock time in case it referred to
1220 the task that has just been deleted. */
1221 prvResetNextTaskUnblockTime();
1222 }
1223
1224 traceTASK_DELETE( pxTCB );
1225 }
1226 taskEXIT_CRITICAL();
1227
1228 /* Force a reschedule if it is the currently running task that has just
1229 been deleted. */
1230 if( xSchedulerRunning != pdFALSE )
1231 {
1232 if( pxTCB == pxCurrentTCB )
1233 {
1234 configASSERT( uxSchedulerSuspended == 0 );
1235 portYIELD_WITHIN_API();
1236 }
1237 else
1238 {
1239 mtCOVERAGE_TEST_MARKER();
1240 }
1241 }
1242 }
1243
1244#endif /* INCLUDE_vTaskDelete */
1245/*-----------------------------------------------------------*/
1246
1247#if ( INCLUDE_vTaskDelayUntil == 1 )
1248
1249 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1250 {
1251 TickType_t xTimeToWake;
1252 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
1253
1254 configASSERT( pxPreviousWakeTime );
1255 configASSERT( ( xTimeIncrement > 0U ) );
1256 configASSERT( uxSchedulerSuspended == 0 );
1257
1258 vTaskSuspendAll();
1259 {
1260 /* Minor optimisation. The tick count cannot change in this
1261 block. */
1262 const TickType_t xConstTickCount = xTickCount;
1263
1264 /* Generate the tick time at which the task wants to wake. */
1265 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1266
1267 if( xConstTickCount < *pxPreviousWakeTime )
1268 {
1269 /* The tick count has overflowed since this function was
1270 lasted called. In this case the only time we should ever
1271 actually delay is if the wake time has also overflowed,
1272 and the wake time is greater than the tick time. When this
1273 is the case it is as if neither time had overflowed. */
1274 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1275 {
1276 xShouldDelay = pdTRUE;
1277 }
1278 else
1279 {
1280 mtCOVERAGE_TEST_MARKER();
1281 }
1282 }
1283 else
1284 {
1285 /* The tick time has not overflowed. In this case we will
1286 delay if either the wake time has overflowed, and/or the
1287 tick time is less than the wake time. */
1288 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1289 {
1290 xShouldDelay = pdTRUE;
1291 }
1292 else
1293 {
1294 mtCOVERAGE_TEST_MARKER();
1295 }
1296 }
1297
1298 /* Update the wake time ready for the next call. */
1299 *pxPreviousWakeTime = xTimeToWake;
1300
1301 if( xShouldDelay != pdFALSE )
1302 {
1303 traceTASK_DELAY_UNTIL( xTimeToWake );
1304
1305 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1306 the time to wake, so subtract the current tick count. */
1307 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
1308 }
1309 else
1310 {
1311 mtCOVERAGE_TEST_MARKER();
1312 }
1313 }
1314 xAlreadyYielded = xTaskResumeAll();
1315
1316 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1317 have put ourselves to sleep. */
1318 if( xAlreadyYielded == pdFALSE )
1319 {
1320 portYIELD_WITHIN_API();
1321 }
1322 else
1323 {
1324 mtCOVERAGE_TEST_MARKER();
1325 }
1326 }
1327
1328#endif /* INCLUDE_vTaskDelayUntil */
1329/*-----------------------------------------------------------*/
1330
1331#if ( INCLUDE_vTaskDelay == 1 )
1332
1333 void vTaskDelay( const TickType_t xTicksToDelay )
1334 {
1335 BaseType_t xAlreadyYielded = pdFALSE;
1336
1337 /* A delay time of zero just forces a reschedule. */
1338 if( xTicksToDelay > ( TickType_t ) 0U )
1339 {
1340 configASSERT( uxSchedulerSuspended == 0 );
1341 vTaskSuspendAll();
1342 {
1343 traceTASK_DELAY();
1344
1345 /* A task that is removed from the event list while the
1346 scheduler is suspended will not get placed in the ready
1347 list or removed from the blocked list until the scheduler
1348 is resumed.
1349
1350 This task cannot be in an event list as it is the currently
1351 executing task. */
1352 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
1353 }
1354 xAlreadyYielded = xTaskResumeAll();
1355 }
1356 else
1357 {
1358 mtCOVERAGE_TEST_MARKER();
1359 }
1360
1361 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1362 have put ourselves to sleep. */
1363 if( xAlreadyYielded == pdFALSE )
1364 {
1365 portYIELD_WITHIN_API();
1366 }
1367 else
1368 {
1369 mtCOVERAGE_TEST_MARKER();
1370 }
1371 }
1372
1373#endif /* INCLUDE_vTaskDelay */
1374/*-----------------------------------------------------------*/
1375
1376#if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1377
1378 eTaskState eTaskGetState( TaskHandle_t xTask )
1379 {
1380 eTaskState eReturn;
1381 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
1382 const TCB_t * const pxTCB = xTask;
1383
1384 configASSERT( pxTCB );
1385
1386 if( pxTCB == pxCurrentTCB )
1387 {
1388 /* The task calling this function is querying its own state. */
1389 eReturn = eRunning;
1390 }
1391 else
1392 {
1393 taskENTER_CRITICAL();
1394 {
1395 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1396 pxDelayedList = pxDelayedTaskList;
1397 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1398 }
1399 taskEXIT_CRITICAL();
1400
1401 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1402 {
1403 /* The task being queried is referenced from one of the Blocked
1404 lists. */
1405 eReturn = eBlocked;
1406 }
1407
1408 #if ( INCLUDE_vTaskSuspend == 1 )
1409 else if( pxStateList == &xSuspendedTaskList )
1410 {
1411 /* The task being queried is referenced from the suspended
1412 list. Is it genuinely suspended or is it blocked
1413 indefinitely? */
1414 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1415 {
1416 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1417 {
1418 /* The task does not appear on the event list item of
1419 and of the RTOS objects, but could still be in the
1420 blocked state if it is waiting on its notification
1421 rather than waiting on an object. */
1422 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1423 {
1424 eReturn = eBlocked;
1425 }
1426 else
1427 {
1428 eReturn = eSuspended;
1429 }
1430 }
1431 #else
1432 {
1433 eReturn = eSuspended;
1434 }
1435 #endif
1436 }
1437 else
1438 {
1439 eReturn = eBlocked;
1440 }
1441 }
1442 #endif
1443
1444 #if ( INCLUDE_vTaskDelete == 1 )
1445 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1446 {
1447 /* The task being queried is referenced from the deleted
1448 tasks list, or it is not referenced from any lists at
1449 all. */
1450 eReturn = eDeleted;
1451 }
1452 #endif
1453
1454 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1455 {
1456 /* If the task is not in any other state, it must be in the
1457 Ready (including pending ready) state. */
1458 eReturn = eReady;
1459 }
1460 }
1461
1462 return eReturn;
1463 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1464
1465#endif /* INCLUDE_eTaskGetState */
1466/*-----------------------------------------------------------*/
1467
1468#if ( INCLUDE_uxTaskPriorityGet == 1 )
1469
1470 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1471 {
1472 TCB_t const *pxTCB;
1473 UBaseType_t uxReturn;
1474
1475 taskENTER_CRITICAL();
1476 {
1477 /* If null is passed in here then it is the priority of the task
1478 that called uxTaskPriorityGet() that is being queried. */
1479 pxTCB = prvGetTCBFromHandle( xTask );
1480 uxReturn = pxTCB->uxPriority;
1481 }
1482 taskEXIT_CRITICAL();
1483
1484 return uxReturn;
1485 }
1486
1487#endif /* INCLUDE_uxTaskPriorityGet */
1488/*-----------------------------------------------------------*/
1489
1490#if ( INCLUDE_uxTaskPriorityGet == 1 )
1491
1492 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1493 {
1494 TCB_t const *pxTCB;
1495 UBaseType_t uxReturn, uxSavedInterruptState;
1496
1497 /* RTOS ports that support interrupt nesting have the concept of a
1498 maximum system call (or maximum API call) interrupt priority.
1499 Interrupts that are above the maximum system call priority are keep
1500 permanently enabled, even when the RTOS kernel is in a critical section,
1501 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1502 is defined in FreeRTOSConfig.h then
1503 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1504 failure if a FreeRTOS API function is called from an interrupt that has
1505 been assigned a priority above the configured maximum system call
1506 priority. Only FreeRTOS functions that end in FromISR can be called
1507 from interrupts that have been assigned a priority at or (logically)
1508 below the maximum system call interrupt priority. FreeRTOS maintains a
1509 separate interrupt safe API to ensure interrupt entry is as fast and as
1510 simple as possible. More information (albeit Cortex-M specific) is
1511 provided on the following link:
1512 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1513 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1514
1515 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
1516 {
1517 /* If null is passed in here then it is the priority of the calling
1518 task that is being queried. */
1519 pxTCB = prvGetTCBFromHandle( xTask );
1520 uxReturn = pxTCB->uxPriority;
1521 }
1522 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
1523
1524 return uxReturn;
1525 }
1526
1527#endif /* INCLUDE_uxTaskPriorityGet */
1528/*-----------------------------------------------------------*/
1529
1530#if ( INCLUDE_vTaskPrioritySet == 1 )
1531
1532 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1533 {
1534 TCB_t *pxTCB;
1535 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1536 BaseType_t xYieldRequired = pdFALSE;
1537
1538 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1539
1540 /* Ensure the new priority is valid. */
1541 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1542 {
1543 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1544 }
1545 else
1546 {
1547 mtCOVERAGE_TEST_MARKER();
1548 }
1549
1550 taskENTER_CRITICAL();
1551 {
1552 /* If null is passed in here then it is the priority of the calling
1553 task that is being changed. */
1554 pxTCB = prvGetTCBFromHandle( xTask );
1555
1556 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1557
1558 #if ( configUSE_MUTEXES == 1 )
1559 {
1560 uxCurrentBasePriority = pxTCB->uxBasePriority;
1561 }
1562 #else
1563 {
1564 uxCurrentBasePriority = pxTCB->uxPriority;
1565 }
1566 #endif
1567
1568 if( uxCurrentBasePriority != uxNewPriority )
1569 {
1570 /* The priority change may have readied a task of higher
1571 priority than the calling task. */
1572 if( uxNewPriority > uxCurrentBasePriority )
1573 {
1574 if( pxTCB != pxCurrentTCB )
1575 {
1576 /* The priority of a task other than the currently
1577 running task is being raised. Is the priority being
1578 raised above that of the running task? */
1579 if( uxNewPriority >= pxCurrentTCB->uxPriority )
1580 {
1581 xYieldRequired = pdTRUE;
1582 }
1583 else
1584 {
1585 mtCOVERAGE_TEST_MARKER();
1586 }
1587 }
1588 else
1589 {
1590 /* The priority of the running task is being raised,
1591 but the running task must already be the highest
1592 priority task able to run so no yield is required. */
1593 }
1594 }
1595 else if( pxTCB == pxCurrentTCB )
1596 {
1597 /* Setting the priority of the running task down means
1598 there may now be another task of higher priority that
1599 is ready to execute. */
1600 xYieldRequired = pdTRUE;
1601 }
1602 else
1603 {
1604 /* Setting the priority of any other task down does not
1605 require a yield as the running task must be above the
1606 new priority of the task being modified. */
1607 }
1608
1609 /* Remember the ready list the task might be referenced from
1610 before its uxPriority member is changed so the
1611 taskRESET_READY_PRIORITY() macro can function correctly. */
1612 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1613
1614 #if ( configUSE_MUTEXES == 1 )
1615 {
1616 /* Only change the priority being used if the task is not
1617 currently using an inherited priority. */
1618 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1619 {
1620 pxTCB->uxPriority = uxNewPriority;
1621 }
1622 else
1623 {
1624 mtCOVERAGE_TEST_MARKER();
1625 }
1626
1627 /* The base priority gets set whatever. */
1628 pxTCB->uxBasePriority = uxNewPriority;
1629 }
1630 #else
1631 {
1632 pxTCB->uxPriority = uxNewPriority;
1633 }
1634 #endif
1635
1636 /* Only reset the event list item value if the value is not
1637 being used for anything else. */
1638 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1639 {
1640 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1641 }
1642 else
1643 {
1644 mtCOVERAGE_TEST_MARKER();
1645 }
1646
1647 /* If the task is in the blocked or suspended list we need do
1648 nothing more than change its priority variable. However, if
1649 the task is in a ready list it needs to be removed and placed
1650 in the list appropriate to its new priority. */
1651 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1652 {
1653 /* The task is currently in its ready list - remove before
1654 adding it to it's new ready list. As we are in a critical
1655 section we can do this even if the scheduler is suspended. */
1656 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1657 {
1658 /* It is known that the task is in its ready list so
1659 there is no need to check again and the port level
1660 reset macro can be called directly. */
1661 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1662 }
1663 else
1664 {
1665 mtCOVERAGE_TEST_MARKER();
1666 }
1667 prvAddTaskToReadyList( pxTCB );
1668 }
1669 else
1670 {
1671 mtCOVERAGE_TEST_MARKER();
1672 }
1673
1674 if( xYieldRequired != pdFALSE )
1675 {
1676 taskYIELD_IF_USING_PREEMPTION();
1677 }
1678 else
1679 {
1680 mtCOVERAGE_TEST_MARKER();
1681 }
1682
1683 /* Remove compiler warning about unused variables when the port
1684 optimised task selection is not being used. */
1685 ( void ) uxPriorityUsedOnEntry;
1686 }
1687 }
1688 taskEXIT_CRITICAL();
1689 }
1690
1691#endif /* INCLUDE_vTaskPrioritySet */
1692/*-----------------------------------------------------------*/
1693
1694#if ( INCLUDE_vTaskSuspend == 1 )
1695
1696 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1697 {
1698 TCB_t *pxTCB;
1699
1700 taskENTER_CRITICAL();
1701 {
1702 /* If null is passed in here then it is the running task that is
1703 being suspended. */
1704 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1705
1706 traceTASK_SUSPEND( pxTCB );
1707
1708 /* Remove task from the ready/delayed list and place in the
1709 suspended list. */
1710 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1711 {
1712 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1713 }
1714 else
1715 {
1716 mtCOVERAGE_TEST_MARKER();
1717 }
1718
1719 /* Is the task waiting on an event also? */
1720 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1721 {
1722 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1723 }
1724 else
1725 {
1726 mtCOVERAGE_TEST_MARKER();
1727 }
1728
1729 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1730
1731 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1732 {
1733 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1734 {
1735 /* The task was blocked to wait for a notification, but is
1736 now suspended, so no notification was received. */
1737 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1738 }
1739 }
1740 #endif
1741 }
1742 taskEXIT_CRITICAL();
1743
1744 if( xSchedulerRunning != pdFALSE )
1745 {
1746 /* Reset the next expected unblock time in case it referred to the
1747 task that is now in the Suspended state. */
1748 taskENTER_CRITICAL();
1749 {
1750 prvResetNextTaskUnblockTime();
1751 }
1752 taskEXIT_CRITICAL();
1753 }
1754 else
1755 {
1756 mtCOVERAGE_TEST_MARKER();
1757 }
1758
1759 if( pxTCB == pxCurrentTCB )
1760 {
1761 if( xSchedulerRunning != pdFALSE )
1762 {
1763 /* The current task has just been suspended. */
1764 configASSERT( uxSchedulerSuspended == 0 );
1765 portYIELD_WITHIN_API();
1766 }
1767 else
1768 {
1769 /* The scheduler is not running, but the task that was pointed
1770 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1771 must be adjusted to point to a different task. */
1772 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1773 {
1774 /* No other tasks are ready, so set pxCurrentTCB back to
1775 NULL so when the next task is created pxCurrentTCB will
1776 be set to point to it no matter what its relative priority
1777 is. */
1778 pxCurrentTCB = NULL;
1779 }
1780 else
1781 {
1782 vTaskSwitchContext();
1783 }
1784 }
1785 }
1786 else
1787 {
1788 mtCOVERAGE_TEST_MARKER();
1789 }
1790 }
1791
1792#endif /* INCLUDE_vTaskSuspend */
1793/*-----------------------------------------------------------*/
1794
1795#if ( INCLUDE_vTaskSuspend == 1 )
1796
1797 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1798 {
1799 BaseType_t xReturn = pdFALSE;
1800 const TCB_t * const pxTCB = xTask;
1801
1802 /* Accesses xPendingReadyList so must be called from a critical
1803 section. */
1804
1805 /* It does not make sense to check if the calling task is suspended. */
1806 configASSERT( xTask );
1807
1808 /* Is the task being resumed actually in the suspended list? */
1809 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
1810 {
1811 /* Has the task already been resumed from within an ISR? */
1812 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
1813 {
1814 /* Is it in the suspended list because it is in the Suspended
1815 state, or because is is blocked with no timeout? */
1816 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
1817 {
1818 xReturn = pdTRUE;
1819 }
1820 else
1821 {
1822 mtCOVERAGE_TEST_MARKER();
1823 }
1824 }
1825 else
1826 {
1827 mtCOVERAGE_TEST_MARKER();
1828 }
1829 }
1830 else
1831 {
1832 mtCOVERAGE_TEST_MARKER();
1833 }
1834
1835 return xReturn;
1836 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1837
1838#endif /* INCLUDE_vTaskSuspend */
1839/*-----------------------------------------------------------*/
1840
1841#if ( INCLUDE_vTaskSuspend == 1 )
1842
1843 void vTaskResume( TaskHandle_t xTaskToResume )
1844 {
1845 TCB_t * const pxTCB = xTaskToResume;
1846
1847 /* It does not make sense to resume the calling task. */
1848 configASSERT( xTaskToResume );
1849
1850 /* The parameter cannot be NULL as it is impossible to resume the
1851 currently executing task. */
1852 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
1853 {
1854 taskENTER_CRITICAL();
1855 {
1856 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1857 {
1858 traceTASK_RESUME( pxTCB );
1859
1860 /* The ready list can be accessed even if the scheduler is
1861 suspended because this is inside a critical section. */
1862 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1863 prvAddTaskToReadyList( pxTCB );
1864
1865 /* A higher priority task may have just been resumed. */
1866 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1867 {
1868 /* This yield may not cause the task just resumed to run,
1869 but will leave the lists in the correct state for the
1870 next yield. */
1871 taskYIELD_IF_USING_PREEMPTION();
1872 }
1873 else
1874 {
1875 mtCOVERAGE_TEST_MARKER();
1876 }
1877 }
1878 else
1879 {
1880 mtCOVERAGE_TEST_MARKER();
1881 }
1882 }
1883 taskEXIT_CRITICAL();
1884 }
1885 else
1886 {
1887 mtCOVERAGE_TEST_MARKER();
1888 }
1889 }
1890
1891#endif /* INCLUDE_vTaskSuspend */
1892
1893/*-----------------------------------------------------------*/
1894
1895#if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1896
1897 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1898 {
1899 BaseType_t xYieldRequired = pdFALSE;
1900 TCB_t * const pxTCB = xTaskToResume;
1901 UBaseType_t uxSavedInterruptStatus;
1902
1903 configASSERT( xTaskToResume );
1904
1905 /* RTOS ports that support interrupt nesting have the concept of a
1906 maximum system call (or maximum API call) interrupt priority.
1907 Interrupts that are above the maximum system call priority are keep
1908 permanently enabled, even when the RTOS kernel is in a critical section,
1909 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1910 is defined in FreeRTOSConfig.h then
1911 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1912 failure if a FreeRTOS API function is called from an interrupt that has
1913 been assigned a priority above the configured maximum system call
1914 priority. Only FreeRTOS functions that end in FromISR can be called
1915 from interrupts that have been assigned a priority at or (logically)
1916 below the maximum system call interrupt priority. FreeRTOS maintains a
1917 separate interrupt safe API to ensure interrupt entry is as fast and as
1918 simple as possible. More information (albeit Cortex-M specific) is
1919 provided on the following link:
1920 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1921 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1922
1923 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1924 {
1925 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1926 {
1927 traceTASK_RESUME_FROM_ISR( pxTCB );
1928
1929 /* Check the ready lists can be accessed. */
1930 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
1931 {
1932 /* Ready lists can be accessed so move the task from the
1933 suspended list to the ready list directly. */
1934 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
1935 {
1936 xYieldRequired = pdTRUE;
1937 }
1938 else
1939 {
1940 mtCOVERAGE_TEST_MARKER();
1941 }
1942
1943 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1944 prvAddTaskToReadyList( pxTCB );
1945 }
1946 else
1947 {
1948 /* The delayed or ready lists cannot be accessed so the task
1949 is held in the pending ready list until the scheduler is
1950 unsuspended. */
1951 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
1952 }
1953 }
1954 else
1955 {
1956 mtCOVERAGE_TEST_MARKER();
1957 }
1958 }
1959 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1960
1961 return xYieldRequired;
1962 }
1963
1964#endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
1965/*-----------------------------------------------------------*/
1966
1967void vTaskStartScheduler( void )
1968{
1969BaseType_t xReturn;
1970
1971 /* Add the idle task at the lowest priority. */
1972 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
1973 {
1974 StaticTask_t *pxIdleTaskTCBBuffer = NULL;
1975 StackType_t *pxIdleTaskStackBuffer = NULL;
1976 uint32_t ulIdleTaskStackSize;
1977
1978 /* The Idle task is created using user provided RAM - obtain the
1979 address of the RAM then create the idle task. */
1980 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
1981 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
1982 configIDLE_TASK_NAME,
1983 ulIdleTaskStackSize,
1984 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
1985 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
1986 pxIdleTaskStackBuffer,
1987 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
1988
1989 if( xIdleTaskHandle != NULL )
1990 {
1991 xReturn = pdPASS;
1992 }
1993 else
1994 {
1995 xReturn = pdFAIL;
1996 }
1997 }
1998 #else
1999 {
2000 /* The Idle task is being created using dynamically allocated RAM. */
2001 xReturn = xTaskCreate( prvIdleTask,
2002 configIDLE_TASK_NAME,
2003 configMINIMAL_STACK_SIZE,
2004 ( void * ) NULL,
2005 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2006 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2007 }
2008 #endif /* configSUPPORT_STATIC_ALLOCATION */
2009
2010 #if ( configUSE_TIMERS == 1 )
2011 {
2012 if( xReturn == pdPASS )
2013 {
2014 xReturn = xTimerCreateTimerTask();
2015 }
2016 else
2017 {
2018 mtCOVERAGE_TEST_MARKER();
2019 }
2020 }
2021 #endif /* configUSE_TIMERS */
2022
2023 if( xReturn == pdPASS )
2024 {
2025 /* freertos_tasks_c_additions_init() should only be called if the user
2026 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2027 the only macro called by the function. */
2028 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2029 {
2030 freertos_tasks_c_additions_init();
2031 }
2032 #endif
2033
2034 /* Interrupts are turned off here, to ensure a tick does not occur
2035 before or during the call to xPortStartScheduler(). The stacks of
2036 the created tasks contain a status word with interrupts switched on
2037 so interrupts will automatically get re-enabled when the first task
2038 starts to run. */
2039 portDISABLE_INTERRUPTS();
2040
2041 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2042 {
2043 /* Switch Newlib's _impure_ptr variable to point to the _reent
2044 structure specific to the task that will run first. */
2045 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
2046 }
2047 #endif /* configUSE_NEWLIB_REENTRANT */
2048
2049 xNextTaskUnblockTime = portMAX_DELAY;
2050 xSchedulerRunning = pdTRUE;
2051 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2052
2053 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2054 macro must be defined to configure the timer/counter used to generate
2055 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2056 is set to 0 and the following line fails to build then ensure you do not
2057 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2058 FreeRTOSConfig.h file. */
2059 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2060
2061 traceTASK_SWITCHED_IN();
2062
2063 /* Setting up the timer tick is hardware specific and thus in the
2064 portable interface. */
2065 if( xPortStartScheduler() != pdFALSE )
2066 {
2067 /* Should not reach here as if the scheduler is running the
2068 function will not return. */
2069 }
2070 else
2071 {
2072 /* Should only reach here if a task calls xTaskEndScheduler(). */
2073 }
2074 }
2075 else
2076 {
2077 /* This line will only be reached if the kernel could not be started,
2078 because there was not enough FreeRTOS heap to create the idle task
2079 or the timer task. */
2080 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2081 }
2082
2083 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2084 meaning xIdleTaskHandle is not used anywhere else. */
2085 ( void ) xIdleTaskHandle;
2086}
2087/*-----------------------------------------------------------*/
2088
2089void vTaskEndScheduler( void )
2090{
2091 /* Stop the scheduler interrupts and call the portable scheduler end
2092 routine so the original ISRs can be restored if necessary. The port
2093 layer must ensure interrupts enable bit is left in the correct state. */
2094 portDISABLE_INTERRUPTS();
2095 xSchedulerRunning = pdFALSE;
2096 vPortEndScheduler();
2097}
2098/*----------------------------------------------------------*/
2099
2100void vTaskSuspendAll( void )
2101{
2102 /* A critical section is not required as the variable is of type
2103 BaseType_t. Please read Richard Barry's reply in the following link to a
2104 post in the FreeRTOS support forum before reporting this as a bug! -
2105 http://goo.gl/wu4acr */
2106 ++uxSchedulerSuspended;
2107 portMEMORY_BARRIER();
2108}
2109/*----------------------------------------------------------*/
2110
2111#if ( configUSE_TICKLESS_IDLE != 0 )
2112
2113 static TickType_t prvGetExpectedIdleTime( void )
2114 {
2115 TickType_t xReturn;
2116 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
2117
2118 /* uxHigherPriorityReadyTasks takes care of the case where
2119 configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
2120 task that are in the Ready state, even though the idle task is
2121 running. */
2122 #if( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
2123 {
2124 if( uxTopReadyPriority > tskIDLE_PRIORITY )
2125 {
2126 uxHigherPriorityReadyTasks = pdTRUE;
2127 }
2128 }
2129 #else
2130 {
2131 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
2132
2133 /* When port optimised task selection is used the uxTopReadyPriority
2134 variable is used as a bit map. If bits other than the least
2135 significant bit are set then there are tasks that have a priority
2136 above the idle priority that are in the Ready state. This takes
2137 care of the case where the co-operative scheduler is in use. */
2138 if( uxTopReadyPriority > uxLeastSignificantBit )
2139 {
2140 uxHigherPriorityReadyTasks = pdTRUE;
2141 }
2142 }
2143 #endif
2144
2145 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
2146 {
2147 xReturn = 0;
2148 }
2149 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2150 {
2151 /* There are other idle priority tasks in the ready state. If
2152 time slicing is used then the very next tick interrupt must be
2153 processed. */
2154 xReturn = 0;
2155 }
2156 else if( uxHigherPriorityReadyTasks != pdFALSE )
2157 {
2158 /* There are tasks in the Ready state that have a priority above the
2159 idle priority. This path can only be reached if
2160 configUSE_PREEMPTION is 0. */
2161 xReturn = 0;
2162 }
2163 else
2164 {
2165 xReturn = xNextTaskUnblockTime - xTickCount;
2166 }
2167
2168 return xReturn;
2169 }
2170
2171#endif /* configUSE_TICKLESS_IDLE */
2172/*----------------------------------------------------------*/
2173
2174BaseType_t xTaskResumeAll( void )
2175{
2176TCB_t *pxTCB = NULL;
2177BaseType_t xAlreadyYielded = pdFALSE;
2178
2179 /* If uxSchedulerSuspended is zero then this function does not match a
2180 previous call to vTaskSuspendAll(). */
2181 configASSERT( uxSchedulerSuspended );
2182
2183 /* It is possible that an ISR caused a task to be removed from an event
2184 list while the scheduler was suspended. If this was the case then the
2185 removed task will have been added to the xPendingReadyList. Once the
2186 scheduler has been resumed it is safe to move all the pending ready
2187 tasks from this list into their appropriate ready list. */
2188 taskENTER_CRITICAL();
2189 {
2190 --uxSchedulerSuspended;
2191
2192 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2193 {
2194 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2195 {
2196 /* Move any readied tasks from the pending list into the
2197 appropriate ready list. */
2198 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
2199 {
2200 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2201 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2202 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2203 prvAddTaskToReadyList( pxTCB );
2204
2205 /* If the moved task has a priority higher than the current
2206 task then a yield must be performed. */
2207 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2208 {
2209 xYieldPending = pdTRUE;
2210 }
2211 else
2212 {
2213 mtCOVERAGE_TEST_MARKER();
2214 }
2215 }
2216
2217 if( pxTCB != NULL )
2218 {
2219 /* A task was unblocked while the scheduler was suspended,
2220 which may have prevented the next unblock time from being
2221 re-calculated, in which case re-calculate it now. Mainly
2222 important for low power tickless implementations, where
2223 this can prevent an unnecessary exit from low power
2224 state. */
2225 prvResetNextTaskUnblockTime();
2226 }
2227
2228 /* If any ticks occurred while the scheduler was suspended then
2229 they should be processed now. This ensures the tick count does
2230 not slip, and that any delayed tasks are resumed at the correct
2231 time. */
2232 {
2233 UBaseType_t uxPendedCounts = uxPendedTicks; /* Non-volatile copy. */
2234
2235 if( uxPendedCounts > ( UBaseType_t ) 0U )
2236 {
2237 do
2238 {
2239 if( xTaskIncrementTick() != pdFALSE )
2240 {
2241 xYieldPending = pdTRUE;
2242 }
2243 else
2244 {
2245 mtCOVERAGE_TEST_MARKER();
2246 }
2247 --uxPendedCounts;
2248 } while( uxPendedCounts > ( UBaseType_t ) 0U );
2249
2250 uxPendedTicks = 0;
2251 }
2252 else
2253 {
2254 mtCOVERAGE_TEST_MARKER();
2255 }
2256 }
2257
2258 if( xYieldPending != pdFALSE )
2259 {
2260 #if( configUSE_PREEMPTION != 0 )
2261 {
2262 xAlreadyYielded = pdTRUE;
2263 }
2264 #endif
2265 taskYIELD_IF_USING_PREEMPTION();
2266 }
2267 else
2268 {
2269 mtCOVERAGE_TEST_MARKER();
2270 }
2271 }
2272 }
2273 else
2274 {
2275 mtCOVERAGE_TEST_MARKER();
2276 }
2277 }
2278 taskEXIT_CRITICAL();
2279
2280 return xAlreadyYielded;
2281}
2282/*-----------------------------------------------------------*/
2283
2284TickType_t xTaskGetTickCount( void )
2285{
2286TickType_t xTicks;
2287
2288 /* Critical section required if running on a 16 bit processor. */
2289 portTICK_TYPE_ENTER_CRITICAL();
2290 {
2291 xTicks = xTickCount;
2292 }
2293 portTICK_TYPE_EXIT_CRITICAL();
2294
2295 return xTicks;
2296}
2297/*-----------------------------------------------------------*/
2298
2299TickType_t xTaskGetTickCountFromISR( void )
2300{
2301TickType_t xReturn;
2302UBaseType_t uxSavedInterruptStatus;
2303
2304 /* RTOS ports that support interrupt nesting have the concept of a maximum
2305 system call (or maximum API call) interrupt priority. Interrupts that are
2306 above the maximum system call priority are kept permanently enabled, even
2307 when the RTOS kernel is in a critical section, but cannot make any calls to
2308 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2309 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2310 failure if a FreeRTOS API function is called from an interrupt that has been
2311 assigned a priority above the configured maximum system call priority.
2312 Only FreeRTOS functions that end in FromISR can be called from interrupts
2313 that have been assigned a priority at or (logically) below the maximum
2314 system call interrupt priority. FreeRTOS maintains a separate interrupt
2315 safe API to ensure interrupt entry is as fast and as simple as possible.
2316 More information (albeit Cortex-M specific) is provided on the following
2317 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2318 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2319
2320 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2321 {
2322 xReturn = xTickCount;
2323 }
2324 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2325
2326 return xReturn;
2327}
2328/*-----------------------------------------------------------*/
2329
2330UBaseType_t uxTaskGetNumberOfTasks( void )
2331{
2332 /* A critical section is not required because the variables are of type
2333 BaseType_t. */
2334 return uxCurrentNumberOfTasks;
2335}
2336/*-----------------------------------------------------------*/
2337
2338char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2339{
2340TCB_t *pxTCB;
2341
2342 /* If null is passed in here then the name of the calling task is being
2343 queried. */
2344 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2345 configASSERT( pxTCB );
2346 return &( pxTCB->pcTaskName[ 0 ] );
2347}
2348/*-----------------------------------------------------------*/
2349
2350#if ( INCLUDE_xTaskGetHandle == 1 )
2351
2352 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
2353 {
2354 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
2355 UBaseType_t x;
2356 char cNextChar;
2357 BaseType_t xBreakLoop;
2358
2359 /* This function is called with the scheduler suspended. */
2360
2361 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2362 {
2363 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2364
2365 do
2366 {
2367 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2368
2369 /* Check each character in the name looking for a match or
2370 mismatch. */
2371 xBreakLoop = pdFALSE;
2372 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2373 {
2374 cNextChar = pxNextTCB->pcTaskName[ x ];
2375
2376 if( cNextChar != pcNameToQuery[ x ] )
2377 {
2378 /* Characters didn't match. */
2379 xBreakLoop = pdTRUE;
2380 }
2381 else if( cNextChar == ( char ) 0x00 )
2382 {
2383 /* Both strings terminated, a match must have been
2384 found. */
2385 pxReturn = pxNextTCB;
2386 xBreakLoop = pdTRUE;
2387 }
2388 else
2389 {
2390 mtCOVERAGE_TEST_MARKER();
2391 }
2392
2393 if( xBreakLoop != pdFALSE )
2394 {
2395 break;
2396 }
2397 }
2398
2399 if( pxReturn != NULL )
2400 {
2401 /* The handle has been found. */
2402 break;
2403 }
2404
2405 } while( pxNextTCB != pxFirstTCB );
2406 }
2407 else
2408 {
2409 mtCOVERAGE_TEST_MARKER();
2410 }
2411
2412 return pxReturn;
2413 }
2414
2415#endif /* INCLUDE_xTaskGetHandle */
2416/*-----------------------------------------------------------*/
2417
2418#if ( INCLUDE_xTaskGetHandle == 1 )
2419
2420 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2421 {
2422 UBaseType_t uxQueue = configMAX_PRIORITIES;
2423 TCB_t* pxTCB;
2424
2425 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2426 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2427
2428 vTaskSuspendAll();
2429 {
2430 /* Search the ready lists. */
2431 do
2432 {
2433 uxQueue--;
2434 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2435
2436 if( pxTCB != NULL )
2437 {
2438 /* Found the handle. */
2439 break;
2440 }
2441
2442 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2443
2444 /* Search the delayed lists. */
2445 if( pxTCB == NULL )
2446 {
2447 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2448 }
2449
2450 if( pxTCB == NULL )
2451 {
2452 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2453 }
2454
2455 #if ( INCLUDE_vTaskSuspend == 1 )
2456 {
2457 if( pxTCB == NULL )
2458 {
2459 /* Search the suspended list. */
2460 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2461 }
2462 }
2463 #endif
2464
2465 #if( INCLUDE_vTaskDelete == 1 )
2466 {
2467 if( pxTCB == NULL )
2468 {
2469 /* Search the deleted list. */
2470 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2471 }
2472 }
2473 #endif
2474 }
2475 ( void ) xTaskResumeAll();
2476
2477 return pxTCB;
2478 }
2479
2480#endif /* INCLUDE_xTaskGetHandle */
2481/*-----------------------------------------------------------*/
2482
2483#if ( configUSE_TRACE_FACILITY == 1 )
2484
2485 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2486 {
2487 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2488
2489 vTaskSuspendAll();
2490 {
2491 /* Is there a space in the array for each task in the system? */
2492 if( uxArraySize >= uxCurrentNumberOfTasks )
2493 {
2494 /* Fill in an TaskStatus_t structure with information on each
2495 task in the Ready state. */
2496 do
2497 {
2498 uxQueue--;
2499 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2500
2501 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2502
2503 /* Fill in an TaskStatus_t structure with information on each
2504 task in the Blocked state. */
2505 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2506 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2507
2508 #if( INCLUDE_vTaskDelete == 1 )
2509 {
2510 /* Fill in an TaskStatus_t structure with information on
2511 each task that has been deleted but not yet cleaned up. */
2512 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2513 }
2514 #endif
2515
2516 #if ( INCLUDE_vTaskSuspend == 1 )
2517 {
2518 /* Fill in an TaskStatus_t structure with information on
2519 each task in the Suspended state. */
2520 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2521 }
2522 #endif
2523
2524 #if ( configGENERATE_RUN_TIME_STATS == 1)
2525 {
2526 if( pulTotalRunTime != NULL )
2527 {
2528 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2529 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2530 #else
2531 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2532 #endif
2533 }
2534 }
2535 #else
2536 {
2537 if( pulTotalRunTime != NULL )
2538 {
2539 *pulTotalRunTime = 0;
2540 }
2541 }
2542 #endif
2543 }
2544 else
2545 {
2546 mtCOVERAGE_TEST_MARKER();
2547 }
2548 }
2549 ( void ) xTaskResumeAll();
2550
2551 return uxTask;
2552 }
2553
2554#endif /* configUSE_TRACE_FACILITY */
2555/*----------------------------------------------------------*/
2556
2557#if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2558
2559 TaskHandle_t xTaskGetIdleTaskHandle( void )
2560 {
2561 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2562 started, then xIdleTaskHandle will be NULL. */
2563 configASSERT( ( xIdleTaskHandle != NULL ) );
2564 return xIdleTaskHandle;
2565 }
2566
2567#endif /* INCLUDE_xTaskGetIdleTaskHandle */
2568/*----------------------------------------------------------*/
2569
2570/* This conditional compilation should use inequality to 0, not equality to 1.
2571This is to ensure vTaskStepTick() is available when user defined low power mode
2572implementations require configUSE_TICKLESS_IDLE to be set to a value other than
25731. */
2574#if ( configUSE_TICKLESS_IDLE != 0 )
2575
2576 void vTaskStepTick( const TickType_t xTicksToJump )
2577 {
2578 /* Correct the tick count value after a period during which the tick
2579 was suppressed. Note this does *not* call the tick hook function for
2580 each stepped tick. */
2581 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2582 xTickCount += xTicksToJump;
2583 traceINCREASE_TICK_COUNT( xTicksToJump );
2584 }
2585
2586#endif /* configUSE_TICKLESS_IDLE */
2587/*----------------------------------------------------------*/
2588
2589#if ( INCLUDE_xTaskAbortDelay == 1 )
2590
2591 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2592 {
2593 TCB_t *pxTCB = xTask;
2594 BaseType_t xReturn;
2595
2596 configASSERT( pxTCB );
2597
2598 vTaskSuspendAll();
2599 {
2600 /* A task can only be prematurely removed from the Blocked state if
2601 it is actually in the Blocked state. */
2602 if( eTaskGetState( xTask ) == eBlocked )
2603 {
2604 xReturn = pdPASS;
2605
2606 /* Remove the reference to the task from the blocked list. An
2607 interrupt won't touch the xStateListItem because the
2608 scheduler is suspended. */
2609 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2610
2611 /* Is the task waiting on an event also? If so remove it from
2612 the event list too. Interrupts can touch the event list item,
2613 even though the scheduler is suspended, so a critical section
2614 is used. */
2615 taskENTER_CRITICAL();
2616 {
2617 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2618 {
2619 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2620 pxTCB->ucDelayAborted = pdTRUE;
2621 }
2622 else
2623 {
2624 mtCOVERAGE_TEST_MARKER();
2625 }
2626 }
2627 taskEXIT_CRITICAL();
2628
2629 /* Place the unblocked task into the appropriate ready list. */
2630 prvAddTaskToReadyList( pxTCB );
2631
2632 /* A task being unblocked cannot cause an immediate context
2633 switch if preemption is turned off. */
2634 #if ( configUSE_PREEMPTION == 1 )
2635 {
2636 /* Preemption is on, but a context switch should only be
2637 performed if the unblocked task has a priority that is
2638 equal to or higher than the currently executing task. */
2639 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2640 {
2641 /* Pend the yield to be performed when the scheduler
2642 is unsuspended. */
2643 xYieldPending = pdTRUE;
2644 }
2645 else
2646 {
2647 mtCOVERAGE_TEST_MARKER();
2648 }
2649 }
2650 #endif /* configUSE_PREEMPTION */
2651 }
2652 else
2653 {
2654 xReturn = pdFAIL;
2655 }
2656 }
2657 ( void ) xTaskResumeAll();
2658
2659 return xReturn;
2660 }
2661
2662#endif /* INCLUDE_xTaskAbortDelay */
2663/*----------------------------------------------------------*/
2664
2665BaseType_t xTaskIncrementTick( void )
2666{
2667TCB_t * pxTCB;
2668TickType_t xItemValue;
2669BaseType_t xSwitchRequired = pdFALSE;
2670
2671 /* Called by the portable layer each time a tick interrupt occurs.
2672 Increments the tick then checks to see if the new tick value will cause any
2673 tasks to be unblocked. */
2674 traceTASK_INCREMENT_TICK( xTickCount );
2675 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
2676 {
2677 /* Minor optimisation. The tick count cannot change in this
2678 block. */
2679 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
2680
2681 /* Increment the RTOS tick, switching the delayed and overflowed
2682 delayed lists if it wraps to 0. */
2683 xTickCount = xConstTickCount;
2684
2685 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
2686 {
2687 taskSWITCH_DELAYED_LISTS();
2688 }
2689 else
2690 {
2691 mtCOVERAGE_TEST_MARKER();
2692 }
2693
2694 /* See if this tick has made a timeout expire. Tasks are stored in
2695 the queue in the order of their wake time - meaning once one task
2696 has been found whose block time has not expired there is no need to
2697 look any further down the list. */
2698 if( xConstTickCount >= xNextTaskUnblockTime )
2699 {
2700 for( ;; )
2701 {
2702 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2703 {
2704 /* The delayed list is empty. Set xNextTaskUnblockTime
2705 to the maximum possible value so it is extremely
2706 unlikely that the
2707 if( xTickCount >= xNextTaskUnblockTime ) test will pass
2708 next time through. */
2709 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2710 break;
2711 }
2712 else
2713 {
2714 /* The delayed list is not empty, get the value of the
2715 item at the head of the delayed list. This is the time
2716 at which the task at the head of the delayed list must
2717 be removed from the Blocked state. */
2718 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2719 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
2720
2721 if( xConstTickCount < xItemValue )
2722 {
2723 /* It is not time to unblock this item yet, but the
2724 item value is the time at which the task at the head
2725 of the blocked list must be removed from the Blocked
2726 state - so record the item value in
2727 xNextTaskUnblockTime. */
2728 xNextTaskUnblockTime = xItemValue;
2729 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
2730 }
2731 else
2732 {
2733 mtCOVERAGE_TEST_MARKER();
2734 }
2735
2736 /* It is time to remove the item from the Blocked state. */
2737 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2738
2739 /* Is the task waiting on an event also? If so remove
2740 it from the event list. */
2741 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2742 {
2743 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2744 }
2745 else
2746 {
2747 mtCOVERAGE_TEST_MARKER();
2748 }
2749
2750 /* Place the unblocked task into the appropriate ready
2751 list. */
2752 prvAddTaskToReadyList( pxTCB );
2753
2754 /* A task being unblocked cannot cause an immediate
2755 context switch if preemption is turned off. */
2756 #if ( configUSE_PREEMPTION == 1 )
2757 {
2758 /* Preemption is on, but a context switch should
2759 only be performed if the unblocked task has a
2760 priority that is equal to or higher than the
2761 currently executing task. */
2762 if( pxTCB->uxPriority >= pxCurrentTCB->uxPriority )
2763 {
2764 xSwitchRequired = pdTRUE;
2765 }
2766 else
2767 {
2768 mtCOVERAGE_TEST_MARKER();
2769 }
2770 }
2771 #endif /* configUSE_PREEMPTION */
2772 }
2773 }
2774 }
2775
2776 /* Tasks of equal priority to the currently running task will share
2777 processing time (time slice) if preemption is on, and the application
2778 writer has not explicitly turned time slicing off. */
2779 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2780 {
2781 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2782 {
2783 xSwitchRequired = pdTRUE;
2784 }
2785 else
2786 {
2787 mtCOVERAGE_TEST_MARKER();
2788 }
2789 }
2790 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2791
2792 #if ( configUSE_TICK_HOOK == 1 )
2793 {
2794 /* Guard against the tick hook being called when the pended tick
2795 count is being unwound (when the scheduler is being unlocked). */
2796 if( uxPendedTicks == ( UBaseType_t ) 0U )
2797 {
2798 vApplicationTickHook();
2799 }
2800 else
2801 {
2802 mtCOVERAGE_TEST_MARKER();
2803 }
2804 }
2805 #endif /* configUSE_TICK_HOOK */
2806 }
2807 else
2808 {
2809 ++uxPendedTicks;
2810
2811 /* The tick hook gets called at regular intervals, even if the
2812 scheduler is locked. */
2813 #if ( configUSE_TICK_HOOK == 1 )
2814 {
2815 vApplicationTickHook();
2816 }
2817 #endif
2818 }
2819
2820 #if ( configUSE_PREEMPTION == 1 )
2821 {
2822 if( xYieldPending != pdFALSE )
2823 {
2824 xSwitchRequired = pdTRUE;
2825 }
2826 else
2827 {
2828 mtCOVERAGE_TEST_MARKER();
2829 }
2830 }
2831 #endif /* configUSE_PREEMPTION */
2832
2833 return xSwitchRequired;
2834}
2835/*-----------------------------------------------------------*/
2836
2837#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2838
2839 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
2840 {
2841 TCB_t *xTCB;
2842
2843 /* If xTask is NULL then it is the task hook of the calling task that is
2844 getting set. */
2845 if( xTask == NULL )
2846 {
2847 xTCB = ( TCB_t * ) pxCurrentTCB;
2848 }
2849 else
2850 {
2851 xTCB = xTask;
2852 }
2853
2854 /* Save the hook function in the TCB. A critical section is required as
2855 the value can be accessed from an interrupt. */
2856 taskENTER_CRITICAL();
2857 {
2858 xTCB->pxTaskTag = pxHookFunction;
2859 }
2860 taskEXIT_CRITICAL();
2861 }
2862
2863#endif /* configUSE_APPLICATION_TASK_TAG */
2864/*-----------------------------------------------------------*/
2865
2866#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2867
2868 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2869 {
2870 TCB_t *pxTCB;
2871 TaskHookFunction_t xReturn;
2872
2873 /* If xTask is NULL then set the calling task's hook. */
2874 pxTCB = prvGetTCBFromHandle( xTask );
2875
2876 /* Save the hook function in the TCB. A critical section is required as
2877 the value can be accessed from an interrupt. */
2878 taskENTER_CRITICAL();
2879 {
2880 xReturn = pxTCB->pxTaskTag;
2881 }
2882 taskEXIT_CRITICAL();
2883
2884 return xReturn;
2885 }
2886
2887#endif /* configUSE_APPLICATION_TASK_TAG */
2888/*-----------------------------------------------------------*/
2889
2890#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2891
2892 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
2893 {
2894 TCB_t *pxTCB;
2895 TaskHookFunction_t xReturn;
2896 UBaseType_t uxSavedInterruptStatus;
2897
2898 /* If xTask is NULL then set the calling task's hook. */
2899 pxTCB = prvGetTCBFromHandle( xTask );
2900
2901 /* Save the hook function in the TCB. A critical section is required as
2902 the value can be accessed from an interrupt. */
2903 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
2904 {
2905 xReturn = pxTCB->pxTaskTag;
2906 }
2907 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2908
2909 return xReturn;
2910 }
2911
2912#endif /* configUSE_APPLICATION_TASK_TAG */
2913/*-----------------------------------------------------------*/
2914
2915#if ( configUSE_APPLICATION_TASK_TAG == 1 )
2916
2917 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
2918 {
2919 TCB_t *xTCB;
2920 BaseType_t xReturn;
2921
2922 /* If xTask is NULL then we are calling our own task hook. */
2923 if( xTask == NULL )
2924 {
2925 xTCB = pxCurrentTCB;
2926 }
2927 else
2928 {
2929 xTCB = xTask;
2930 }
2931
2932 if( xTCB->pxTaskTag != NULL )
2933 {
2934 xReturn = xTCB->pxTaskTag( pvParameter );
2935 }
2936 else
2937 {
2938 xReturn = pdFAIL;
2939 }
2940
2941 return xReturn;
2942 }
2943
2944#endif /* configUSE_APPLICATION_TASK_TAG */
2945/*-----------------------------------------------------------*/
2946
2947void vTaskSwitchContext( void )
2948{
2949 if( uxSchedulerSuspended != ( UBaseType_t ) pdFALSE )
2950 {
2951 /* The scheduler is currently suspended - do not allow a context
2952 switch. */
2953 xYieldPending = pdTRUE;
2954 }
2955 else
2956 {
2957 xYieldPending = pdFALSE;
2958 traceTASK_SWITCHED_OUT();
2959
2960 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2961 {
2962 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2963 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
2964 #else
2965 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2966 #endif
2967
2968 /* Add the amount of time the task has been running to the
2969 accumulated time so far. The time the task started running was
2970 stored in ulTaskSwitchedInTime. Note that there is no overflow
2971 protection here so count values are only valid until the timer
2972 overflows. The guard against negative values is to protect
2973 against suspect run time stat counter implementations - which
2974 are provided by the application, not the kernel. */
2975 if( ulTotalRunTime > ulTaskSwitchedInTime )
2976 {
2977 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
2978 }
2979 else
2980 {
2981 mtCOVERAGE_TEST_MARKER();
2982 }
2983 ulTaskSwitchedInTime = ulTotalRunTime;
2984 }
2985 #endif /* configGENERATE_RUN_TIME_STATS */
2986
2987 /* Check for stack overflow, if configured. */
2988 taskCHECK_FOR_STACK_OVERFLOW();
2989
2990 /* Before the currently running task is switched out, save its errno. */
2991 #if( configUSE_POSIX_ERRNO == 1 )
2992 {
2993 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
2994 }
2995 #endif
2996
2997 /* Select a new task to run using either the generic C or port
2998 optimised asm code. */
2999 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3000 traceTASK_SWITCHED_IN();
3001
3002 /* After the new task is switched in, update the global errno. */
3003 #if( configUSE_POSIX_ERRNO == 1 )
3004 {
3005 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3006 }
3007 #endif
3008
3009 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3010 {
3011 /* Switch Newlib's _impure_ptr variable to point to the _reent
3012 structure specific to this task. */
3013 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
3014 }
3015 #endif /* configUSE_NEWLIB_REENTRANT */
3016 }
3017}
3018/*-----------------------------------------------------------*/
3019
3020void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
3021{
3022 configASSERT( pxEventList );
3023
3024 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3025 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3026
3027 /* Place the event list item of the TCB in the appropriate event list.
3028 This is placed in the list in priority order so the highest priority task
3029 is the first to be woken by the event. The queue that contains the event
3030 list is locked, preventing simultaneous access from interrupts. */
3031 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3032
3033 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3034}
3035/*-----------------------------------------------------------*/
3036
3037void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3038{
3039 configASSERT( pxEventList );
3040
3041 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3042 the event groups implementation. */
3043 configASSERT( uxSchedulerSuspended != 0 );
3044
3045 /* Store the item value in the event list item. It is safe to access the
3046 event list item here as interrupts won't access the event list item of a
3047 task that is not in the Blocked state. */
3048 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3049
3050 /* Place the event list item of the TCB at the end of the appropriate event
3051 list. It is safe to access the event list here because it is part of an
3052 event group implementation - and interrupts don't access event groups
3053 directly (instead they access them indirectly by pending function calls to
3054 the task level). */
3055 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3056
3057 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3058}
3059/*-----------------------------------------------------------*/
3060
3061#if( configUSE_TIMERS == 1 )
3062
3063 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3064 {
3065 configASSERT( pxEventList );
3066
3067 /* This function should not be called by application code hence the
3068 'Restricted' in its name. It is not part of the public API. It is
3069 designed for use by kernel code, and has special calling requirements -
3070 it should be called with the scheduler suspended. */
3071
3072
3073 /* Place the event list item of the TCB in the appropriate event list.
3074 In this case it is assume that this is the only task that is going to
3075 be waiting on this event list, so the faster vListInsertEnd() function
3076 can be used in place of vListInsert. */
3077 vListInsertEnd( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3078
3079 /* If the task should block indefinitely then set the block time to a
3080 value that will be recognised as an indefinite delay inside the
3081 prvAddCurrentTaskToDelayedList() function. */
3082 if( xWaitIndefinitely != pdFALSE )
3083 {
3084 xTicksToWait = portMAX_DELAY;
3085 }
3086
3087 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
3088 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
3089 }
3090
3091#endif /* configUSE_TIMERS */
3092/*-----------------------------------------------------------*/
3093
3094BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3095{
3096TCB_t *pxUnblockedTCB;
3097BaseType_t xReturn;
3098
3099 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3100 called from a critical section within an ISR. */
3101
3102 /* The event list is sorted in priority order, so the first in the list can
3103 be removed as it is known to be the highest priority. Remove the TCB from
3104 the delayed list, and add it to the ready list.
3105
3106 If an event is for a queue that is locked then this function will never
3107 get called - the lock count on the queue will get modified instead. This
3108 means exclusive access to the event list is guaranteed here.
3109
3110 This function assumes that a check has already been made to ensure that
3111 pxEventList is not empty. */
3112 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3113 configASSERT( pxUnblockedTCB );
3114 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3115
3116 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3117 {
3118 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3119 prvAddTaskToReadyList( pxUnblockedTCB );
3120
3121 #if( configUSE_TICKLESS_IDLE != 0 )
3122 {
3123 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3124 might be set to the blocked task's time out time. If the task is
3125 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3126 normally left unchanged, because it is automatically reset to a new
3127 value when the tick count equals xNextTaskUnblockTime. However if
3128 tickless idling is used it might be more important to enter sleep mode
3129 at the earliest possible time - so reset xNextTaskUnblockTime here to
3130 ensure it is updated at the earliest possible time. */
3131 prvResetNextTaskUnblockTime();
3132 }
3133 #endif
3134 }
3135 else
3136 {
3137 /* The delayed and ready lists cannot be accessed, so hold this task
3138 pending until the scheduler is resumed. */
3139 vListInsertEnd( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
3140 }
3141
3142 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3143 {
3144 /* Return true if the task removed from the event list has a higher
3145 priority than the calling task. This allows the calling task to know if
3146 it should force a context switch now. */
3147 xReturn = pdTRUE;
3148
3149 /* Mark that a yield is pending in case the user is not using the
3150 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3151 xYieldPending = pdTRUE;
3152 }
3153 else
3154 {
3155 xReturn = pdFALSE;
3156 }
3157
3158 return xReturn;
3159}
3160/*-----------------------------------------------------------*/
3161
3162void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3163{
3164TCB_t *pxUnblockedTCB;
3165
3166 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3167 the event flags implementation. */
3168 configASSERT( uxSchedulerSuspended != pdFALSE );
3169
3170 /* Store the new item value in the event list. */
3171 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3172
3173 /* Remove the event list form the event flag. Interrupts do not access
3174 event flags. */
3175 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3176 configASSERT( pxUnblockedTCB );
3177 ( void ) uxListRemove( pxEventListItem );
3178
3179 /* Remove the task from the delayed list and add it to the ready list. The
3180 scheduler is suspended so interrupts will not be accessing the ready
3181 lists. */
3182 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3183 prvAddTaskToReadyList( pxUnblockedTCB );
3184
3185 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3186 {
3187 /* The unblocked task has a priority above that of the calling task, so
3188 a context switch is required. This function is called with the
3189 scheduler suspended so xYieldPending is set so the context switch
3190 occurs immediately that the scheduler is resumed (unsuspended). */
3191 xYieldPending = pdTRUE;
3192 }
3193}
3194/*-----------------------------------------------------------*/
3195
3196void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3197{
3198 configASSERT( pxTimeOut );
3199 taskENTER_CRITICAL();
3200 {
3201 pxTimeOut->xOverflowCount = xNumOfOverflows;
3202 pxTimeOut->xTimeOnEntering = xTickCount;
3203 }
3204 taskEXIT_CRITICAL();
3205}
3206/*-----------------------------------------------------------*/
3207
3208void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3209{
3210 /* For internal use only as it does not use a critical section. */
3211 pxTimeOut->xOverflowCount = xNumOfOverflows;
3212 pxTimeOut->xTimeOnEntering = xTickCount;
3213}
3214/*-----------------------------------------------------------*/
3215
3216BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3217{
3218BaseType_t xReturn;
3219
3220 configASSERT( pxTimeOut );
3221 configASSERT( pxTicksToWait );
3222
3223 taskENTER_CRITICAL();
3224 {
3225 /* Minor optimisation. The tick count cannot change in this block. */
3226 const TickType_t xConstTickCount = xTickCount;
3227 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3228
3229 #if( INCLUDE_xTaskAbortDelay == 1 )
3230 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
3231 {
3232 /* The delay was aborted, which is not the same as a time out,
3233 but has the same result. */
3234 pxCurrentTCB->ucDelayAborted = pdFALSE;
3235 xReturn = pdTRUE;
3236 }
3237 else
3238 #endif
3239
3240 #if ( INCLUDE_vTaskSuspend == 1 )
3241 if( *pxTicksToWait == portMAX_DELAY )
3242 {
3243 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3244 specified is the maximum block time then the task should block
3245 indefinitely, and therefore never time out. */
3246 xReturn = pdFALSE;
3247 }
3248 else
3249 #endif
3250
3251 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3252 {
3253 /* The tick count is greater than the time at which
3254 vTaskSetTimeout() was called, but has also overflowed since
3255 vTaskSetTimeOut() was called. It must have wrapped all the way
3256 around and gone past again. This passed since vTaskSetTimeout()
3257 was called. */
3258 xReturn = pdTRUE;
3259 }
3260 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3261 {
3262 /* Not a genuine timeout. Adjust parameters for time remaining. */
3263 *pxTicksToWait -= xElapsedTime;
3264 vTaskInternalSetTimeOutState( pxTimeOut );
3265 xReturn = pdFALSE;
3266 }
3267 else
3268 {
3269 *pxTicksToWait = 0;
3270 xReturn = pdTRUE;
3271 }
3272 }
3273 taskEXIT_CRITICAL();
3274
3275 return xReturn;
3276}
3277/*-----------------------------------------------------------*/
3278
3279void vTaskMissedYield( void )
3280{
3281 xYieldPending = pdTRUE;
3282}
3283/*-----------------------------------------------------------*/
3284
3285#if ( configUSE_TRACE_FACILITY == 1 )
3286
3287 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3288 {
3289 UBaseType_t uxReturn;
3290 TCB_t const *pxTCB;
3291
3292 if( xTask != NULL )
3293 {
3294 pxTCB = xTask;
3295 uxReturn = pxTCB->uxTaskNumber;
3296 }
3297 else
3298 {
3299 uxReturn = 0U;
3300 }
3301
3302 return uxReturn;
3303 }
3304
3305#endif /* configUSE_TRACE_FACILITY */
3306/*-----------------------------------------------------------*/
3307
3308#if ( configUSE_TRACE_FACILITY == 1 )
3309
3310 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3311 {
3312 TCB_t * pxTCB;
3313
3314 if( xTask != NULL )
3315 {
3316 pxTCB = xTask;
3317 pxTCB->uxTaskNumber = uxHandle;
3318 }
3319 }
3320
3321#endif /* configUSE_TRACE_FACILITY */
3322
3323/*
3324 * -----------------------------------------------------------
3325 * The Idle task.
3326 * ----------------------------------------------------------
3327 *
3328 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3329 * language extensions. The equivalent prototype for this function is:
3330 *
3331 * void prvIdleTask( void *pvParameters );
3332 *
3333 */
3334static portTASK_FUNCTION( prvIdleTask, pvParameters )
3335{
3336 /* Stop warnings. */
3337 ( void ) pvParameters;
3338
3339 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3340 SCHEDULER IS STARTED. **/
3341
3342 /* In case a task that has a secure context deletes itself, in which case
3343 the idle task is responsible for deleting the task's secure context, if
3344 any. */
3345 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3346
3347 for( ;; )
3348 {
3349 /* See if any tasks have deleted themselves - if so then the idle task
3350 is responsible for freeing the deleted task's TCB and stack. */
3351 prvCheckTasksWaitingTermination();
3352
3353 #if ( configUSE_PREEMPTION == 0 )
3354 {
3355 /* If we are not using preemption we keep forcing a task switch to
3356 see if any other task has become available. If we are using
3357 preemption we don't need to do this as any task becoming available
3358 will automatically get the processor anyway. */
3359 taskYIELD();
3360 }
3361 #endif /* configUSE_PREEMPTION */
3362
3363 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3364 {
3365 /* When using preemption tasks of equal priority will be
3366 timesliced. If a task that is sharing the idle priority is ready
3367 to run then the idle task should yield before the end of the
3368 timeslice.
3369
3370 A critical region is not required here as we are just reading from
3371 the list, and an occasional incorrect value will not matter. If
3372 the ready list at the idle priority contains more than one task
3373 then a task other than the idle task is ready to execute. */
3374 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3375 {
3376 taskYIELD();
3377 }
3378 else
3379 {
3380 mtCOVERAGE_TEST_MARKER();
3381 }
3382 }
3383 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3384
3385 #if ( configUSE_IDLE_HOOK == 1 )
3386 {
3387 extern void vApplicationIdleHook( void );
3388
3389 /* Call the user defined function from within the idle task. This
3390 allows the application designer to add background functionality
3391 without the overhead of a separate task.
3392 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3393 CALL A FUNCTION THAT MIGHT BLOCK. */
3394 vApplicationIdleHook();
3395 }
3396 #endif /* configUSE_IDLE_HOOK */
3397
3398 /* This conditional compilation should use inequality to 0, not equality
3399 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3400 user defined low power mode implementations require
3401 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3402 #if ( configUSE_TICKLESS_IDLE != 0 )
3403 {
3404 TickType_t xExpectedIdleTime;
3405
3406 /* It is not desirable to suspend then resume the scheduler on
3407 each iteration of the idle task. Therefore, a preliminary
3408 test of the expected idle time is performed without the
3409 scheduler suspended. The result here is not necessarily
3410 valid. */
3411 xExpectedIdleTime = prvGetExpectedIdleTime();
3412
3413 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3414 {
3415 vTaskSuspendAll();
3416 {
3417 /* Now the scheduler is suspended, the expected idle
3418 time can be sampled again, and this time its value can
3419 be used. */
3420 configASSERT( xNextTaskUnblockTime >= xTickCount );
3421 xExpectedIdleTime = prvGetExpectedIdleTime();
3422
3423 /* Define the following macro to set xExpectedIdleTime to 0
3424 if the application does not want
3425 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3426 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3427
3428 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3429 {
3430 traceLOW_POWER_IDLE_BEGIN();
3431 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3432 traceLOW_POWER_IDLE_END();
3433 }
3434 else
3435 {
3436 mtCOVERAGE_TEST_MARKER();
3437 }
3438 }
3439 ( void ) xTaskResumeAll();
3440 }
3441 else
3442 {
3443 mtCOVERAGE_TEST_MARKER();
3444 }
3445 }
3446 #endif /* configUSE_TICKLESS_IDLE */
3447 }
3448}
3449/*-----------------------------------------------------------*/
3450
3451#if( configUSE_TICKLESS_IDLE != 0 )
3452
3453 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3454 {
3455 /* The idle task exists in addition to the application tasks. */
3456 const UBaseType_t uxNonApplicationTasks = 1;
3457 eSleepModeStatus eReturn = eStandardSleep;
3458
3459 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
3460 {
3461 /* A task was made ready while the scheduler was suspended. */
3462 eReturn = eAbortSleep;
3463 }
3464 else if( xYieldPending != pdFALSE )
3465 {
3466 /* A yield was pended while the scheduler was suspended. */
3467 eReturn = eAbortSleep;
3468 }
3469 else
3470 {
3471 /* If all the tasks are in the suspended list (which might mean they
3472 have an infinite block time rather than actually being suspended)
3473 then it is safe to turn all clocks off and just wait for external
3474 interrupts. */
3475 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3476 {
3477 eReturn = eNoTasksWaitingTimeout;
3478 }
3479 else
3480 {
3481 mtCOVERAGE_TEST_MARKER();
3482 }
3483 }
3484
3485 return eReturn;
3486 }
3487
3488#endif /* configUSE_TICKLESS_IDLE */
3489/*-----------------------------------------------------------*/
3490
3491#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3492
3493 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3494 {
3495 TCB_t *pxTCB;
3496
3497 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3498 {
3499 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3500 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3501 }
3502 }
3503
3504#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3505/*-----------------------------------------------------------*/
3506
3507#if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3508
3509 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3510 {
3511 void *pvReturn = NULL;
3512 TCB_t *pxTCB;
3513
3514 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3515 {
3516 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3517 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3518 }
3519 else
3520 {
3521 pvReturn = NULL;
3522 }
3523
3524 return pvReturn;
3525 }
3526
3527#endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3528/*-----------------------------------------------------------*/
3529
3530#if ( portUSING_MPU_WRAPPERS == 1 )
3531
3532 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
3533 {
3534 TCB_t *pxTCB;
3535
3536 /* If null is passed in here then we are modifying the MPU settings of
3537 the calling task. */
3538 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3539
3540 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3541 }
3542
3543#endif /* portUSING_MPU_WRAPPERS */
3544/*-----------------------------------------------------------*/
3545
3546static void prvInitialiseTaskLists( void )
3547{
3548UBaseType_t uxPriority;
3549
3550 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3551 {
3552 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3553 }
3554
3555 vListInitialise( &xDelayedTaskList1 );
3556 vListInitialise( &xDelayedTaskList2 );
3557 vListInitialise( &xPendingReadyList );
3558
3559 #if ( INCLUDE_vTaskDelete == 1 )
3560 {
3561 vListInitialise( &xTasksWaitingTermination );
3562 }
3563 #endif /* INCLUDE_vTaskDelete */
3564
3565 #if ( INCLUDE_vTaskSuspend == 1 )
3566 {
3567 vListInitialise( &xSuspendedTaskList );
3568 }
3569 #endif /* INCLUDE_vTaskSuspend */
3570
3571 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3572 using list2. */
3573 pxDelayedTaskList = &xDelayedTaskList1;
3574 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3575}
3576/*-----------------------------------------------------------*/
3577
3578static void prvCheckTasksWaitingTermination( void )
3579{
3580
3581 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
3582
3583 #if ( INCLUDE_vTaskDelete == 1 )
3584 {
3585 TCB_t *pxTCB;
3586
3587 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
3588 being called too often in the idle task. */
3589 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
3590 {
3591 taskENTER_CRITICAL();
3592 {
3593 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3594 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3595 --uxCurrentNumberOfTasks;
3596 --uxDeletedTasksWaitingCleanUp;
3597 }
3598 taskEXIT_CRITICAL();
3599
3600 prvDeleteTCB( pxTCB );
3601 }
3602 }
3603 #endif /* INCLUDE_vTaskDelete */
3604}
3605/*-----------------------------------------------------------*/
3606
3607#if( configUSE_TRACE_FACILITY == 1 )
3608
3609 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
3610 {
3611 TCB_t *pxTCB;
3612
3613 /* xTask is NULL then get the state of the calling task. */
3614 pxTCB = prvGetTCBFromHandle( xTask );
3615
3616 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
3617 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
3618 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
3619 pxTaskStatus->pxStackBase = pxTCB->pxStack;
3620 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
3621
3622 #if ( configUSE_MUTEXES == 1 )
3623 {
3624 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
3625 }
3626 #else
3627 {
3628 pxTaskStatus->uxBasePriority = 0;
3629 }
3630 #endif
3631
3632 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3633 {
3634 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
3635 }
3636 #else
3637 {
3638 pxTaskStatus->ulRunTimeCounter = 0;
3639 }
3640 #endif
3641
3642 /* Obtaining the task state is a little fiddly, so is only done if the
3643 value of eState passed into this function is eInvalid - otherwise the
3644 state is just set to whatever is passed in. */
3645 if( eState != eInvalid )
3646 {
3647 if( pxTCB == pxCurrentTCB )
3648 {
3649 pxTaskStatus->eCurrentState = eRunning;
3650 }
3651 else
3652 {
3653 pxTaskStatus->eCurrentState = eState;
3654
3655 #if ( INCLUDE_vTaskSuspend == 1 )
3656 {
3657 /* If the task is in the suspended list then there is a
3658 chance it is actually just blocked indefinitely - so really
3659 it should be reported as being in the Blocked state. */
3660 if( eState == eSuspended )
3661 {
3662 vTaskSuspendAll();
3663 {
3664 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3665 {
3666 pxTaskStatus->eCurrentState = eBlocked;
3667 }
3668 }
3669 ( void ) xTaskResumeAll();
3670 }
3671 }
3672 #endif /* INCLUDE_vTaskSuspend */
3673 }
3674 }
3675 else
3676 {
3677 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
3678 }
3679
3680 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
3681 parameter is provided to allow it to be skipped. */
3682 if( xGetFreeStackSpace != pdFALSE )
3683 {
3684 #if ( portSTACK_GROWTH > 0 )
3685 {
3686 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
3687 }
3688 #else
3689 {
3690 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
3691 }
3692 #endif
3693 }
3694 else
3695 {
3696 pxTaskStatus->usStackHighWaterMark = 0;
3697 }
3698 }
3699
3700#endif /* configUSE_TRACE_FACILITY */
3701/*-----------------------------------------------------------*/
3702
3703#if ( configUSE_TRACE_FACILITY == 1 )
3704
3705 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
3706 {
3707 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
3708 UBaseType_t uxTask = 0;
3709
3710 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3711 {
3712 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3713
3714 /* Populate an TaskStatus_t structure within the
3715 pxTaskStatusArray array for each task that is referenced from
3716 pxList. See the definition of TaskStatus_t in task.h for the
3717 meaning of each TaskStatus_t structure member. */
3718 do
3719 {
3720 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3721 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
3722 uxTask++;
3723 } while( pxNextTCB != pxFirstTCB );
3724 }
3725 else
3726 {
3727 mtCOVERAGE_TEST_MARKER();
3728 }
3729
3730 return uxTask;
3731 }
3732
3733#endif /* configUSE_TRACE_FACILITY */
3734/*-----------------------------------------------------------*/
3735
3736#if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
3737
3738 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3739 {
3740 uint32_t ulCount = 0U;
3741
3742 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3743 {
3744 pucStackByte -= portSTACK_GROWTH;
3745 ulCount++;
3746 }
3747
3748 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3749
3750 return ( configSTACK_DEPTH_TYPE ) ulCount;
3751 }
3752
3753#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
3754/*-----------------------------------------------------------*/
3755
3756#if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
3757
3758 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
3759 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
3760 user to determine the return type. It gets around the problem of the value
3761 overflowing on 8-bit types without breaking backward compatibility for
3762 applications that expect an 8-bit return type. */
3763 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
3764 {
3765 TCB_t *pxTCB;
3766 uint8_t *pucEndOfStack;
3767 configSTACK_DEPTH_TYPE uxReturn;
3768
3769 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
3770 the same except for their return type. Using configSTACK_DEPTH_TYPE
3771 allows the user to determine the return type. It gets around the
3772 problem of the value overflowing on 8-bit types without breaking
3773 backward compatibility for applications that expect an 8-bit return
3774 type. */
3775
3776 pxTCB = prvGetTCBFromHandle( xTask );
3777
3778 #if portSTACK_GROWTH < 0
3779 {
3780 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3781 }
3782 #else
3783 {
3784 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3785 }
3786 #endif
3787
3788 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
3789
3790 return uxReturn;
3791 }
3792
3793#endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
3794/*-----------------------------------------------------------*/
3795
3796#if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3797
3798 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3799 {
3800 TCB_t *pxTCB;
3801 uint8_t *pucEndOfStack;
3802 UBaseType_t uxReturn;
3803
3804 pxTCB = prvGetTCBFromHandle( xTask );
3805
3806 #if portSTACK_GROWTH < 0
3807 {
3808 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3809 }
3810 #else
3811 {
3812 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3813 }
3814 #endif
3815
3816 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
3817
3818 return uxReturn;
3819 }
3820
3821#endif /* INCLUDE_uxTaskGetStackHighWaterMark */
3822/*-----------------------------------------------------------*/
3823
3824#if ( INCLUDE_vTaskDelete == 1 )
3825
3826 static void prvDeleteTCB( TCB_t *pxTCB )
3827 {
3828 /* This call is required specifically for the TriCore port. It must be
3829 above the vPortFree() calls. The call is also used by ports/demos that
3830 want to allocate and clean RAM statically. */
3831 portCLEAN_UP_TCB( pxTCB );
3832
3833 /* Free up the memory allocated by the scheduler for the task. It is up
3834 to the task to free any memory allocated at the application level. */
3835 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3836 {
3837 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
3838 }
3839 #endif /* configUSE_NEWLIB_REENTRANT */
3840
3841 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
3842 {
3843 /* The task can only have been allocated dynamically - free both
3844 the stack and TCB. */
3845 vPortFree( pxTCB->pxStack );
3846 vPortFree( pxTCB );
3847 }
3848 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
3849 {
3850 /* The task could have been allocated statically or dynamically, so
3851 check what was statically allocated before trying to free the
3852 memory. */
3853 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
3854 {
3855 /* Both the stack and TCB were allocated dynamically, so both
3856 must be freed. */
3857 vPortFree( pxTCB->pxStack );
3858 vPortFree( pxTCB );
3859 }
3860 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
3861 {
3862 /* Only the stack was statically allocated, so the TCB is the
3863 only memory that must be freed. */
3864 vPortFree( pxTCB );
3865 }
3866 else
3867 {
3868 /* Neither the stack nor the TCB were allocated dynamically, so
3869 nothing needs to be freed. */
3870 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
3871 mtCOVERAGE_TEST_MARKER();
3872 }
3873 }
3874 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
3875 }
3876
3877#endif /* INCLUDE_vTaskDelete */
3878/*-----------------------------------------------------------*/
3879
3880static void prvResetNextTaskUnblockTime( void )
3881{
3882TCB_t *pxTCB;
3883
3884 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3885 {
3886 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
3887 the maximum possible value so it is extremely unlikely that the
3888 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
3889 there is an item in the delayed list. */
3890 xNextTaskUnblockTime = portMAX_DELAY;
3891 }
3892 else
3893 {
3894 /* The new current delayed list is not empty, get the value of
3895 the item at the head of the delayed list. This is the time at
3896 which the task at the head of the delayed list should be removed
3897 from the Blocked state. */
3898 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3899 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
3900 }
3901}
3902/*-----------------------------------------------------------*/
3903
3904#if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
3905
3906 TaskHandle_t xTaskGetCurrentTaskHandle( void )
3907 {
3908 TaskHandle_t xReturn;
3909
3910 /* A critical section is not required as this is not called from
3911 an interrupt and the current TCB will always be the same for any
3912 individual execution thread. */
3913 xReturn = pxCurrentTCB;
3914
3915 return xReturn;
3916 }
3917
3918#endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
3919/*-----------------------------------------------------------*/
3920
3921#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
3922
3923 BaseType_t xTaskGetSchedulerState( void )
3924 {
3925 BaseType_t xReturn;
3926
3927 if( xSchedulerRunning == pdFALSE )
3928 {
3929 xReturn = taskSCHEDULER_NOT_STARTED;
3930 }
3931 else
3932 {
3933 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
3934 {
3935 xReturn = taskSCHEDULER_RUNNING;
3936 }
3937 else
3938 {
3939 xReturn = taskSCHEDULER_SUSPENDED;
3940 }
3941 }
3942
3943 return xReturn;
3944 }
3945
3946#endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
3947/*-----------------------------------------------------------*/
3948
3949#if ( configUSE_MUTEXES == 1 )
3950
3951 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
3952 {
3953 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
3954 BaseType_t xReturn = pdFALSE;
3955
3956 /* If the mutex was given back by an interrupt while the queue was
3957 locked then the mutex holder might now be NULL. _RB_ Is this still
3958 needed as interrupts can no longer use mutexes? */
3959 if( pxMutexHolder != NULL )
3960 {
3961 /* If the holder of the mutex has a priority below the priority of
3962 the task attempting to obtain the mutex then it will temporarily
3963 inherit the priority of the task attempting to obtain the mutex. */
3964 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
3965 {
3966 /* Adjust the mutex holder state to account for its new
3967 priority. Only reset the event list item value if the value is
3968 not being used for anything else. */
3969 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
3970 {
3971 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
3972 }
3973 else
3974 {
3975 mtCOVERAGE_TEST_MARKER();
3976 }
3977
3978 /* If the task being modified is in the ready state it will need
3979 to be moved into a new list. */
3980 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
3981 {
3982 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
3983 {
3984 taskRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority );
3985 }
3986 else
3987 {
3988 mtCOVERAGE_TEST_MARKER();
3989 }
3990
3991 /* Inherit the priority before being moved into the new list. */
3992 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
3993 prvAddTaskToReadyList( pxMutexHolderTCB );
3994 }
3995 else
3996 {
3997 /* Just inherit the priority. */
3998 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
3999 }
4000
4001 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
4002
4003 /* Inheritance occurred. */
4004 xReturn = pdTRUE;
4005 }
4006 else
4007 {
4008 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
4009 {
4010 /* The base priority of the mutex holder is lower than the
4011 priority of the task attempting to take the mutex, but the
4012 current priority of the mutex holder is not lower than the
4013 priority of the task attempting to take the mutex.
4014 Therefore the mutex holder must have already inherited a
4015 priority, but inheritance would have occurred if that had
4016 not been the case. */
4017 xReturn = pdTRUE;
4018 }
4019 else
4020 {
4021 mtCOVERAGE_TEST_MARKER();
4022 }
4023 }
4024 }
4025 else
4026 {
4027 mtCOVERAGE_TEST_MARKER();
4028 }
4029
4030 return xReturn;
4031 }
4032
4033#endif /* configUSE_MUTEXES */
4034/*-----------------------------------------------------------*/
4035
4036#if ( configUSE_MUTEXES == 1 )
4037
4038 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4039 {
4040 TCB_t * const pxTCB = pxMutexHolder;
4041 BaseType_t xReturn = pdFALSE;
4042
4043 if( pxMutexHolder != NULL )
4044 {
4045 /* A task can only have an inherited priority if it holds the mutex.
4046 If the mutex is held by a task then it cannot be given from an
4047 interrupt, and if a mutex is given by the holding task then it must
4048 be the running state task. */
4049 configASSERT( pxTCB == pxCurrentTCB );
4050 configASSERT( pxTCB->uxMutexesHeld );
4051 ( pxTCB->uxMutexesHeld )--;
4052
4053 /* Has the holder of the mutex inherited the priority of another
4054 task? */
4055 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4056 {
4057 /* Only disinherit if no other mutexes are held. */
4058 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4059 {
4060 /* A task can only have an inherited priority if it holds
4061 the mutex. If the mutex is held by a task then it cannot be
4062 given from an interrupt, and if a mutex is given by the
4063 holding task then it must be the running state task. Remove
4064 the holding task from the ready list. */
4065 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4066 {
4067 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4068 }
4069 else
4070 {
4071 mtCOVERAGE_TEST_MARKER();
4072 }
4073
4074 /* Disinherit the priority before adding the task into the
4075 new ready list. */
4076 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4077 pxTCB->uxPriority = pxTCB->uxBasePriority;
4078
4079 /* Reset the event list item value. It cannot be in use for
4080 any other purpose if this task is running, and it must be
4081 running to give back the mutex. */
4082 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4083 prvAddTaskToReadyList( pxTCB );
4084
4085 /* Return true to indicate that a context switch is required.
4086 This is only actually required in the corner case whereby
4087 multiple mutexes were held and the mutexes were given back
4088 in an order different to that in which they were taken.
4089 If a context switch did not occur when the first mutex was
4090 returned, even if a task was waiting on it, then a context
4091 switch should occur when the last mutex is returned whether
4092 a task is waiting on it or not. */
4093 xReturn = pdTRUE;
4094 }
4095 else
4096 {
4097 mtCOVERAGE_TEST_MARKER();
4098 }
4099 }
4100 else
4101 {
4102 mtCOVERAGE_TEST_MARKER();
4103 }
4104 }
4105 else
4106 {
4107 mtCOVERAGE_TEST_MARKER();
4108 }
4109
4110 return xReturn;
4111 }
4112
4113#endif /* configUSE_MUTEXES */
4114/*-----------------------------------------------------------*/
4115
4116#if ( configUSE_MUTEXES == 1 )
4117
4118 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
4119 {
4120 TCB_t * const pxTCB = pxMutexHolder;
4121 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4122 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4123
4124 if( pxMutexHolder != NULL )
4125 {
4126 /* If pxMutexHolder is not NULL then the holder must hold at least
4127 one mutex. */
4128 configASSERT( pxTCB->uxMutexesHeld );
4129
4130 /* Determine the priority to which the priority of the task that
4131 holds the mutex should be set. This will be the greater of the
4132 holding task's base priority and the priority of the highest
4133 priority task that is waiting to obtain the mutex. */
4134 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4135 {
4136 uxPriorityToUse = uxHighestPriorityWaitingTask;
4137 }
4138 else
4139 {
4140 uxPriorityToUse = pxTCB->uxBasePriority;
4141 }
4142
4143 /* Does the priority need to change? */
4144 if( pxTCB->uxPriority != uxPriorityToUse )
4145 {
4146 /* Only disinherit if no other mutexes are held. This is a
4147 simplification in the priority inheritance implementation. If
4148 the task that holds the mutex is also holding other mutexes then
4149 the other mutexes may have caused the priority inheritance. */
4150 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4151 {
4152 /* If a task has timed out because it already holds the
4153 mutex it was trying to obtain then it cannot of inherited
4154 its own priority. */
4155 configASSERT( pxTCB != pxCurrentTCB );
4156
4157 /* Disinherit the priority, remembering the previous
4158 priority to facilitate determining the subject task's
4159 state. */
4160 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4161 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4162 pxTCB->uxPriority = uxPriorityToUse;
4163
4164 /* Only reset the event list item value if the value is not
4165 being used for anything else. */
4166 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4167 {
4168 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4169 }
4170 else
4171 {
4172 mtCOVERAGE_TEST_MARKER();
4173 }
4174
4175 /* If the running task is not the task that holds the mutex
4176 then the task that holds the mutex could be in either the
4177 Ready, Blocked or Suspended states. Only remove the task
4178 from its current state list if it is in the Ready state as
4179 the task's priority is going to change and there is one
4180 Ready list per priority. */
4181 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4182 {
4183 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4184 {
4185 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4186 }
4187 else
4188 {
4189 mtCOVERAGE_TEST_MARKER();
4190 }
4191
4192 prvAddTaskToReadyList( pxTCB );
4193 }
4194 else
4195 {
4196 mtCOVERAGE_TEST_MARKER();
4197 }
4198 }
4199 else
4200 {
4201 mtCOVERAGE_TEST_MARKER();
4202 }
4203 }
4204 else
4205 {
4206 mtCOVERAGE_TEST_MARKER();
4207 }
4208 }
4209 else
4210 {
4211 mtCOVERAGE_TEST_MARKER();
4212 }
4213 }
4214
4215#endif /* configUSE_MUTEXES */
4216/*-----------------------------------------------------------*/
4217
4218#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4219
4220 void vTaskEnterCritical( void )
4221 {
4222 portDISABLE_INTERRUPTS();
4223
4224 if( xSchedulerRunning != pdFALSE )
4225 {
4226 ( pxCurrentTCB->uxCriticalNesting )++;
4227
4228 /* This is not the interrupt safe version of the enter critical
4229 function so assert() if it is being called from an interrupt
4230 context. Only API functions that end in "FromISR" can be used in an
4231 interrupt. Only assert if the critical nesting count is 1 to
4232 protect against recursive calls if the assert function also uses a
4233 critical section. */
4234 if( pxCurrentTCB->uxCriticalNesting == 1 )
4235 {
4236 portASSERT_IF_IN_ISR();
4237 }
4238 }
4239 else
4240 {
4241 mtCOVERAGE_TEST_MARKER();
4242 }
4243 }
4244
4245#endif /* portCRITICAL_NESTING_IN_TCB */
4246/*-----------------------------------------------------------*/
4247
4248#if ( portCRITICAL_NESTING_IN_TCB == 1 )
4249
4250 void vTaskExitCritical( void )
4251 {
4252 if( xSchedulerRunning != pdFALSE )
4253 {
4254 if( pxCurrentTCB->uxCriticalNesting > 0U )
4255 {
4256 ( pxCurrentTCB->uxCriticalNesting )--;
4257
4258 if( pxCurrentTCB->uxCriticalNesting == 0U )
4259 {
4260 portENABLE_INTERRUPTS();
4261 }
4262 else
4263 {
4264 mtCOVERAGE_TEST_MARKER();
4265 }
4266 }
4267 else
4268 {
4269 mtCOVERAGE_TEST_MARKER();
4270 }
4271 }
4272 else
4273 {
4274 mtCOVERAGE_TEST_MARKER();
4275 }
4276 }
4277
4278#endif /* portCRITICAL_NESTING_IN_TCB */
4279/*-----------------------------------------------------------*/
4280
4281#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4282
4283 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4284 {
4285 size_t x;
4286
4287 /* Start by copying the entire string. */
4288 strcpy( pcBuffer, pcTaskName );
4289
4290 /* Pad the end of the string with spaces to ensure columns line up when
4291 printed out. */
4292 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4293 {
4294 pcBuffer[ x ] = ' ';
4295 }
4296
4297 /* Terminate. */
4298 pcBuffer[ x ] = ( char ) 0x00;
4299
4300 /* Return the new end of string. */
4301 return &( pcBuffer[ x ] );
4302 }
4303
4304#endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4305/*-----------------------------------------------------------*/
4306
4307#if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4308
4309 void vTaskList( char * pcWriteBuffer )
4310 {
4311 TaskStatus_t *pxTaskStatusArray;
4312 UBaseType_t uxArraySize, x;
4313 char cStatus;
4314
4315 /*
4316 * PLEASE NOTE:
4317 *
4318 * This function is provided for convenience only, and is used by many
4319 * of the demo applications. Do not consider it to be part of the
4320 * scheduler.
4321 *
4322 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4323 * uxTaskGetSystemState() output into a human readable table that
4324 * displays task names, states and stack usage.
4325 *
4326 * vTaskList() has a dependency on the sprintf() C library function that
4327 * might bloat the code size, use a lot of stack, and provide different
4328 * results on different platforms. An alternative, tiny, third party,
4329 * and limited functionality implementation of sprintf() is provided in
4330 * many of the FreeRTOS/Demo sub-directories in a file called
4331 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4332 * snprintf() implementation!).
4333 *
4334 * It is recommended that production systems call uxTaskGetSystemState()
4335 * directly to get access to raw stats data, rather than indirectly
4336 * through a call to vTaskList().
4337 */
4338
4339
4340 /* Make sure the write buffer does not contain a string. */
4341 *pcWriteBuffer = ( char ) 0x00;
4342
4343 /* Take a snapshot of the number of tasks in case it changes while this
4344 function is executing. */
4345 uxArraySize = uxCurrentNumberOfTasks;
4346
4347 /* Allocate an array index for each task. NOTE! if
4348 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4349 equate to NULL. */
4350 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4351
4352 if( pxTaskStatusArray != NULL )
4353 {
4354 /* Generate the (binary) data. */
4355 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4356
4357 /* Create a human readable table from the binary data. */
4358 for( x = 0; x < uxArraySize; x++ )
4359 {
4360 switch( pxTaskStatusArray[ x ].eCurrentState )
4361 {
4362 case eRunning: cStatus = tskRUNNING_CHAR;
4363 break;
4364
4365 case eReady: cStatus = tskREADY_CHAR;
4366 break;
4367
4368 case eBlocked: cStatus = tskBLOCKED_CHAR;
4369 break;
4370
4371 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4372 break;
4373
4374 case eDeleted: cStatus = tskDELETED_CHAR;
4375 break;
4376
4377 case eInvalid: /* Fall through. */
4378 default: /* Should not get here, but it is included
4379 to prevent static checking errors. */
4380 cStatus = ( char ) 0x00;
4381 break;
4382 }
4383
4384 /* Write the task name to the string, padding with spaces so it
4385 can be printed in tabular form more easily. */
4386 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4387
4388 /* Write the rest of the string. */
4389 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4390 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4391 }
4392
4393 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4394 is 0 then vPortFree() will be #defined to nothing. */
4395 vPortFree( pxTaskStatusArray );
4396 }
4397 else
4398 {
4399 mtCOVERAGE_TEST_MARKER();
4400 }
4401 }
4402
4403#endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
4404/*----------------------------------------------------------*/
4405
4406#if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4407
4408 void vTaskGetRunTimeStats( char *pcWriteBuffer )
4409 {
4410 TaskStatus_t *pxTaskStatusArray;
4411 UBaseType_t uxArraySize, x;
4412 uint32_t ulTotalTime, ulStatsAsPercentage;
4413
4414 #if( configUSE_TRACE_FACILITY != 1 )
4415 {
4416 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
4417 }
4418 #endif
4419
4420 /*
4421 * PLEASE NOTE:
4422 *
4423 * This function is provided for convenience only, and is used by many
4424 * of the demo applications. Do not consider it to be part of the
4425 * scheduler.
4426 *
4427 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4428 * of the uxTaskGetSystemState() output into a human readable table that
4429 * displays the amount of time each task has spent in the Running state
4430 * in both absolute and percentage terms.
4431 *
4432 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4433 * function that might bloat the code size, use a lot of stack, and
4434 * provide different results on different platforms. An alternative,
4435 * tiny, third party, and limited functionality implementation of
4436 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4437 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4438 * a full snprintf() implementation!).
4439 *
4440 * It is recommended that production systems call uxTaskGetSystemState()
4441 * directly to get access to raw stats data, rather than indirectly
4442 * through a call to vTaskGetRunTimeStats().
4443 */
4444
4445 /* Make sure the write buffer does not contain a string. */
4446 *pcWriteBuffer = ( char ) 0x00;
4447
4448 /* Take a snapshot of the number of tasks in case it changes while this
4449 function is executing. */
4450 uxArraySize = uxCurrentNumberOfTasks;
4451
4452 /* Allocate an array index for each task. NOTE! If
4453 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4454 equate to NULL. */
4455 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4456
4457 if( pxTaskStatusArray != NULL )
4458 {
4459 /* Generate the (binary) data. */
4460 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4461
4462 /* For percentage calculations. */
4463 ulTotalTime /= 100UL;
4464
4465 /* Avoid divide by zero errors. */
4466 if( ulTotalTime > 0UL )
4467 {
4468 /* Create a human readable table from the binary data. */
4469 for( x = 0; x < uxArraySize; x++ )
4470 {
4471 /* What percentage of the total run time has the task used?
4472 This will always be rounded down to the nearest integer.
4473 ulTotalRunTimeDiv100 has already been divided by 100. */
4474 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
4475
4476 /* Write the task name to the string, padding with
4477 spaces so it can be printed in tabular form more
4478 easily. */
4479 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4480
4481 if( ulStatsAsPercentage > 0UL )
4482 {
4483 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4484 {
4485 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4486 }
4487 #else
4488 {
4489 /* sizeof( int ) == sizeof( long ) so a smaller
4490 printf() library can be used. */
4491 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4492 }
4493 #endif
4494 }
4495 else
4496 {
4497 /* If the percentage is zero here then the task has
4498 consumed less than 1% of the total run time. */
4499 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4500 {
4501 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4502 }
4503 #else
4504 {
4505 /* sizeof( int ) == sizeof( long ) so a smaller
4506 printf() library can be used. */
4507 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4508 }
4509 #endif
4510 }
4511
4512 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4513 }
4514 }
4515 else
4516 {
4517 mtCOVERAGE_TEST_MARKER();
4518 }
4519
4520 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4521 is 0 then vPortFree() will be #defined to nothing. */
4522 vPortFree( pxTaskStatusArray );
4523 }
4524 else
4525 {
4526 mtCOVERAGE_TEST_MARKER();
4527 }
4528 }
4529
4530#endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
4531/*-----------------------------------------------------------*/
4532
4533TickType_t uxTaskResetEventItemValue( void )
4534{
4535TickType_t uxReturn;
4536
4537 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
4538
4539 /* Reset the event list item to its normal value - so it can be used with
4540 queues and semaphores. */
4541 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4542
4543 return uxReturn;
4544}
4545/*-----------------------------------------------------------*/
4546
4547#if ( configUSE_MUTEXES == 1 )
4548
4549 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
4550 {
4551 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4552 then pxCurrentTCB will be NULL. */
4553 if( pxCurrentTCB != NULL )
4554 {
4555 ( pxCurrentTCB->uxMutexesHeld )++;
4556 }
4557
4558 return pxCurrentTCB;
4559 }
4560
4561#endif /* configUSE_MUTEXES */
4562/*-----------------------------------------------------------*/
4563
4564#if( configUSE_TASK_NOTIFICATIONS == 1 )
4565
4566 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
4567 {
4568 uint32_t ulReturn;
4569
4570 taskENTER_CRITICAL();
4571 {
4572 /* Only block if the notification count is not already non-zero. */
4573 if( pxCurrentTCB->ulNotifiedValue == 0UL )
4574 {
4575 /* Mark this task as waiting for a notification. */
4576 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4577
4578 if( xTicksToWait > ( TickType_t ) 0 )
4579 {
4580 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4581 traceTASK_NOTIFY_TAKE_BLOCK();
4582
4583 /* All ports are written to allow a yield in a critical
4584 section (some will yield immediately, others wait until the
4585 critical section exits) - but it is not something that
4586 application code should ever do. */
4587 portYIELD_WITHIN_API();
4588 }
4589 else
4590 {
4591 mtCOVERAGE_TEST_MARKER();
4592 }
4593 }
4594 else
4595 {
4596 mtCOVERAGE_TEST_MARKER();
4597 }
4598 }
4599 taskEXIT_CRITICAL();
4600
4601 taskENTER_CRITICAL();
4602 {
4603 traceTASK_NOTIFY_TAKE();
4604 ulReturn = pxCurrentTCB->ulNotifiedValue;
4605
4606 if( ulReturn != 0UL )
4607 {
4608 if( xClearCountOnExit != pdFALSE )
4609 {
4610 pxCurrentTCB->ulNotifiedValue = 0UL;
4611 }
4612 else
4613 {
4614 pxCurrentTCB->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
4615 }
4616 }
4617 else
4618 {
4619 mtCOVERAGE_TEST_MARKER();
4620 }
4621
4622 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4623 }
4624 taskEXIT_CRITICAL();
4625
4626 return ulReturn;
4627 }
4628
4629#endif /* configUSE_TASK_NOTIFICATIONS */
4630/*-----------------------------------------------------------*/
4631
4632#if( configUSE_TASK_NOTIFICATIONS == 1 )
4633
4634 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
4635 {
4636 BaseType_t xReturn;
4637
4638 taskENTER_CRITICAL();
4639 {
4640 /* Only block if a notification is not already pending. */
4641 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4642 {
4643 /* Clear bits in the task's notification value as bits may get
4644 set by the notifying task or interrupt. This can be used to
4645 clear the value to zero. */
4646 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnEntry;
4647
4648 /* Mark this task as waiting for a notification. */
4649 pxCurrentTCB->ucNotifyState = taskWAITING_NOTIFICATION;
4650
4651 if( xTicksToWait > ( TickType_t ) 0 )
4652 {
4653 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4654 traceTASK_NOTIFY_WAIT_BLOCK();
4655
4656 /* All ports are written to allow a yield in a critical
4657 section (some will yield immediately, others wait until the
4658 critical section exits) - but it is not something that
4659 application code should ever do. */
4660 portYIELD_WITHIN_API();
4661 }
4662 else
4663 {
4664 mtCOVERAGE_TEST_MARKER();
4665 }
4666 }
4667 else
4668 {
4669 mtCOVERAGE_TEST_MARKER();
4670 }
4671 }
4672 taskEXIT_CRITICAL();
4673
4674 taskENTER_CRITICAL();
4675 {
4676 traceTASK_NOTIFY_WAIT();
4677
4678 if( pulNotificationValue != NULL )
4679 {
4680 /* Output the current notification value, which may or may not
4681 have changed. */
4682 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue;
4683 }
4684
4685 /* If ucNotifyValue is set then either the task never entered the
4686 blocked state (because a notification was already pending) or the
4687 task unblocked because of a notification. Otherwise the task
4688 unblocked because of a timeout. */
4689 if( pxCurrentTCB->ucNotifyState != taskNOTIFICATION_RECEIVED )
4690 {
4691 /* A notification was not received. */
4692 xReturn = pdFALSE;
4693 }
4694 else
4695 {
4696 /* A notification was already pending or a notification was
4697 received while the task was waiting. */
4698 pxCurrentTCB->ulNotifiedValue &= ~ulBitsToClearOnExit;
4699 xReturn = pdTRUE;
4700 }
4701
4702 pxCurrentTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
4703 }
4704 taskEXIT_CRITICAL();
4705
4706 return xReturn;
4707 }
4708
4709#endif /* configUSE_TASK_NOTIFICATIONS */
4710/*-----------------------------------------------------------*/
4711
4712#if( configUSE_TASK_NOTIFICATIONS == 1 )
4713
4714 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
4715 {
4716 TCB_t * pxTCB;
4717 BaseType_t xReturn = pdPASS;
4718 uint8_t ucOriginalNotifyState;
4719
4720 configASSERT( xTaskToNotify );
4721 pxTCB = xTaskToNotify;
4722
4723 taskENTER_CRITICAL();
4724 {
4725 if( pulPreviousNotificationValue != NULL )
4726 {
4727 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4728 }
4729
4730 ucOriginalNotifyState = pxTCB->ucNotifyState;
4731
4732 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4733
4734 switch( eAction )
4735 {
4736 case eSetBits :
4737 pxTCB->ulNotifiedValue |= ulValue;
4738 break;
4739
4740 case eIncrement :
4741 ( pxTCB->ulNotifiedValue )++;
4742 break;
4743
4744 case eSetValueWithOverwrite :
4745 pxTCB->ulNotifiedValue = ulValue;
4746 break;
4747
4748 case eSetValueWithoutOverwrite :
4749 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4750 {
4751 pxTCB->ulNotifiedValue = ulValue;
4752 }
4753 else
4754 {
4755 /* The value could not be written to the task. */
4756 xReturn = pdFAIL;
4757 }
4758 break;
4759
4760 case eNoAction:
4761 /* The task is being notified without its notify value being
4762 updated. */
4763 break;
4764
4765 default:
4766 /* Should not get here if all enums are handled.
4767 Artificially force an assert by testing a value the
4768 compiler can't assume is const. */
4769 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
4770
4771 break;
4772 }
4773
4774 traceTASK_NOTIFY();
4775
4776 /* If the task is in the blocked state specifically to wait for a
4777 notification then unblock it now. */
4778 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4779 {
4780 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4781 prvAddTaskToReadyList( pxTCB );
4782
4783 /* The task should not have been on an event list. */
4784 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4785
4786 #if( configUSE_TICKLESS_IDLE != 0 )
4787 {
4788 /* If a task is blocked waiting for a notification then
4789 xNextTaskUnblockTime might be set to the blocked task's time
4790 out time. If the task is unblocked for a reason other than
4791 a timeout xNextTaskUnblockTime is normally left unchanged,
4792 because it will automatically get reset to a new value when
4793 the tick count equals xNextTaskUnblockTime. However if
4794 tickless idling is used it might be more important to enter
4795 sleep mode at the earliest possible time - so reset
4796 xNextTaskUnblockTime here to ensure it is updated at the
4797 earliest possible time. */
4798 prvResetNextTaskUnblockTime();
4799 }
4800 #endif
4801
4802 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4803 {
4804 /* The notified task has a priority above the currently
4805 executing task so a yield is required. */
4806 taskYIELD_IF_USING_PREEMPTION();
4807 }
4808 else
4809 {
4810 mtCOVERAGE_TEST_MARKER();
4811 }
4812 }
4813 else
4814 {
4815 mtCOVERAGE_TEST_MARKER();
4816 }
4817 }
4818 taskEXIT_CRITICAL();
4819
4820 return xReturn;
4821 }
4822
4823#endif /* configUSE_TASK_NOTIFICATIONS */
4824/*-----------------------------------------------------------*/
4825
4826#if( configUSE_TASK_NOTIFICATIONS == 1 )
4827
4828 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
4829 {
4830 TCB_t * pxTCB;
4831 uint8_t ucOriginalNotifyState;
4832 BaseType_t xReturn = pdPASS;
4833 UBaseType_t uxSavedInterruptStatus;
4834
4835 configASSERT( xTaskToNotify );
4836
4837 /* RTOS ports that support interrupt nesting have the concept of a
4838 maximum system call (or maximum API call) interrupt priority.
4839 Interrupts that are above the maximum system call priority are keep
4840 permanently enabled, even when the RTOS kernel is in a critical section,
4841 but cannot make any calls to FreeRTOS API functions. If configASSERT()
4842 is defined in FreeRTOSConfig.h then
4843 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4844 failure if a FreeRTOS API function is called from an interrupt that has
4845 been assigned a priority above the configured maximum system call
4846 priority. Only FreeRTOS functions that end in FromISR can be called
4847 from interrupts that have been assigned a priority at or (logically)
4848 below the maximum system call interrupt priority. FreeRTOS maintains a
4849 separate interrupt safe API to ensure interrupt entry is as fast and as
4850 simple as possible. More information (albeit Cortex-M specific) is
4851 provided on the following link:
4852 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
4853 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4854
4855 pxTCB = xTaskToNotify;
4856
4857 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
4858 {
4859 if( pulPreviousNotificationValue != NULL )
4860 {
4861 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
4862 }
4863
4864 ucOriginalNotifyState = pxTCB->ucNotifyState;
4865 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4866
4867 switch( eAction )
4868 {
4869 case eSetBits :
4870 pxTCB->ulNotifiedValue |= ulValue;
4871 break;
4872
4873 case eIncrement :
4874 ( pxTCB->ulNotifiedValue )++;
4875 break;
4876
4877 case eSetValueWithOverwrite :
4878 pxTCB->ulNotifiedValue = ulValue;
4879 break;
4880
4881 case eSetValueWithoutOverwrite :
4882 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4883 {
4884 pxTCB->ulNotifiedValue = ulValue;
4885 }
4886 else
4887 {
4888 /* The value could not be written to the task. */
4889 xReturn = pdFAIL;
4890 }
4891 break;
4892
4893 case eNoAction :
4894 /* The task is being notified without its notify value being
4895 updated. */
4896 break;
4897
4898 default:
4899 /* Should not get here if all enums are handled.
4900 Artificially force an assert by testing a value the
4901 compiler can't assume is const. */
4902 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
4903 break;
4904 }
4905
4906 traceTASK_NOTIFY_FROM_ISR();
4907
4908 /* If the task is in the blocked state specifically to wait for a
4909 notification then unblock it now. */
4910 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4911 {
4912 /* The task should not have been on an event list. */
4913 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4914
4915 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
4916 {
4917 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
4918 prvAddTaskToReadyList( pxTCB );
4919 }
4920 else
4921 {
4922 /* The delayed and ready lists cannot be accessed, so hold
4923 this task pending until the scheduler is resumed. */
4924 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
4925 }
4926
4927 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
4928 {
4929 /* The notified task has a priority above the currently
4930 executing task so a yield is required. */
4931 if( pxHigherPriorityTaskWoken != NULL )
4932 {
4933 *pxHigherPriorityTaskWoken = pdTRUE;
4934 }
4935
4936 /* Mark that a yield is pending in case the user is not
4937 using the "xHigherPriorityTaskWoken" parameter to an ISR
4938 safe FreeRTOS function. */
4939 xYieldPending = pdTRUE;
4940 }
4941 else
4942 {
4943 mtCOVERAGE_TEST_MARKER();
4944 }
4945 }
4946 }
4947 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
4948
4949 return xReturn;
4950 }
4951
4952#endif /* configUSE_TASK_NOTIFICATIONS */
4953/*-----------------------------------------------------------*/
4954
4955#if( configUSE_TASK_NOTIFICATIONS == 1 )
4956
4957 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
4958 {
4959 TCB_t * pxTCB;
4960 uint8_t ucOriginalNotifyState;
4961 UBaseType_t uxSavedInterruptStatus;
4962
4963 configASSERT( xTaskToNotify );
4964
4965 /* RTOS ports that support interrupt nesting have the concept of a
4966 maximum system call (or maximum API call) interrupt priority.
4967 Interrupts that are above the maximum system call priority are keep
4968 permanently enabled, even when the RTOS kernel is in a critical section,
4969 but cannot make any calls to FreeRTOS API functions. If configASSERT()
4970 is defined in FreeRTOSConfig.h then
4971 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
4972 failure if a FreeRTOS API function is called from an interrupt that has
4973 been assigned a priority above the configured maximum system call
4974 priority. Only FreeRTOS functions that end in FromISR can be called
4975 from interrupts that have been assigned a priority at or (logically)
4976 below the maximum system call interrupt priority. FreeRTOS maintains a
4977 separate interrupt safe API to ensure interrupt entry is as fast and as
4978 simple as possible. More information (albeit Cortex-M specific) is
4979 provided on the following link:
4980 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
4981 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
4982
4983 pxTCB = xTaskToNotify;
4984
4985 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
4986 {
4987 ucOriginalNotifyState = pxTCB->ucNotifyState;
4988 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
4989
4990 /* 'Giving' is equivalent to incrementing a count in a counting
4991 semaphore. */
4992 ( pxTCB->ulNotifiedValue )++;
4993
4994 traceTASK_NOTIFY_GIVE_FROM_ISR();
4995
4996 /* If the task is in the blocked state specifically to wait for a
4997 notification then unblock it now. */
4998 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4999 {
5000 /* The task should not have been on an event list. */
5001 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5002
5003 if( uxSchedulerSuspended == ( UBaseType_t ) pdFALSE )
5004 {
5005 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5006 prvAddTaskToReadyList( pxTCB );
5007 }
5008 else
5009 {
5010 /* The delayed and ready lists cannot be accessed, so hold
5011 this task pending until the scheduler is resumed. */
5012 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5013 }
5014
5015 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5016 {
5017 /* The notified task has a priority above the currently
5018 executing task so a yield is required. */
5019 if( pxHigherPriorityTaskWoken != NULL )
5020 {
5021 *pxHigherPriorityTaskWoken = pdTRUE;
5022 }
5023
5024 /* Mark that a yield is pending in case the user is not
5025 using the "xHigherPriorityTaskWoken" parameter in an ISR
5026 safe FreeRTOS function. */
5027 xYieldPending = pdTRUE;
5028 }
5029 else
5030 {
5031 mtCOVERAGE_TEST_MARKER();
5032 }
5033 }
5034 }
5035 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5036 }
5037
5038#endif /* configUSE_TASK_NOTIFICATIONS */
5039
5040/*-----------------------------------------------------------*/
5041
5042#if( configUSE_TASK_NOTIFICATIONS == 1 )
5043
5044 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
5045 {
5046 TCB_t *pxTCB;
5047 BaseType_t xReturn;
5048
5049 /* If null is passed in here then it is the calling task that is having
5050 its notification state cleared. */
5051 pxTCB = prvGetTCBFromHandle( xTask );
5052
5053 taskENTER_CRITICAL();
5054 {
5055 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
5056 {
5057 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5058 xReturn = pdPASS;
5059 }
5060 else
5061 {
5062 xReturn = pdFAIL;
5063 }
5064 }
5065 taskEXIT_CRITICAL();
5066
5067 return xReturn;
5068 }
5069
5070#endif /* configUSE_TASK_NOTIFICATIONS */
5071/*-----------------------------------------------------------*/
5072
5073#if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5074 TickType_t xTaskGetIdleRunTimeCounter( void )
5075 {
5076 return xIdleTaskHandle->ulRunTimeCounter;
5077 }
5078#endif
5079/*-----------------------------------------------------------*/
5080
5081static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait, const BaseType_t xCanBlockIndefinitely )
5082{
5083TickType_t xTimeToWake;
5084const TickType_t xConstTickCount = xTickCount;
5085
5086 #if( INCLUDE_xTaskAbortDelay == 1 )
5087 {
5088 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5089 reset to pdFALSE so it can be detected as having been set to pdTRUE
5090 when the task leaves the Blocked state. */
5091 pxCurrentTCB->ucDelayAborted = pdFALSE;
5092 }
5093 #endif
5094
5095 /* Remove the task from the ready list before adding it to the blocked list
5096 as the same list item is used for both lists. */
5097 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
5098 {
5099 /* The current task must be in a ready list, so there is no need to
5100 check, and the port reset macro can be called directly. */
5101 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5102 }
5103 else
5104 {
5105 mtCOVERAGE_TEST_MARKER();
5106 }
5107
5108 #if ( INCLUDE_vTaskSuspend == 1 )
5109 {
5110 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
5111 {
5112 /* Add the task to the suspended task list instead of a delayed task
5113 list to ensure it is not woken by a timing event. It will block
5114 indefinitely. */
5115 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
5116 }
5117 else
5118 {
5119 /* Calculate the time at which the task should be woken if the event
5120 does not occur. This may overflow but this doesn't matter, the
5121 kernel will manage it correctly. */
5122 xTimeToWake = xConstTickCount + xTicksToWait;
5123
5124 /* The list item will be inserted in wake time order. */
5125 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5126
5127 if( xTimeToWake < xConstTickCount )
5128 {
5129 /* Wake time has overflowed. Place this item in the overflow
5130 list. */
5131 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5132 }
5133 else
5134 {
5135 /* The wake time has not overflowed, so the current block list
5136 is used. */
5137 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5138
5139 /* If the task entering the blocked state was placed at the
5140 head of the list of blocked tasks then xNextTaskUnblockTime
5141 needs to be updated too. */
5142 if( xTimeToWake < xNextTaskUnblockTime )
5143 {
5144 xNextTaskUnblockTime = xTimeToWake;
5145 }
5146 else
5147 {
5148 mtCOVERAGE_TEST_MARKER();
5149 }
5150 }
5151 }
5152 }
5153 #else /* INCLUDE_vTaskSuspend */
5154 {
5155 /* Calculate the time at which the task should be woken if the event
5156 does not occur. This may overflow but this doesn't matter, the kernel
5157 will manage it correctly. */
5158 xTimeToWake = xConstTickCount + xTicksToWait;
5159
5160 /* The list item will be inserted in wake time order. */
5161 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5162
5163 if( xTimeToWake < xConstTickCount )
5164 {
5165 /* Wake time has overflowed. Place this item in the overflow list. */
5166 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5167 }
5168 else
5169 {
5170 /* The wake time has not overflowed, so the current block list is used. */
5171 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5172
5173 /* If the task entering the blocked state was placed at the head of the
5174 list of blocked tasks then xNextTaskUnblockTime needs to be updated
5175 too. */
5176 if( xTimeToWake < xNextTaskUnblockTime )
5177 {
5178 xNextTaskUnblockTime = xTimeToWake;
5179 }
5180 else
5181 {
5182 mtCOVERAGE_TEST_MARKER();
5183 }
5184 }
5185
5186 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
5187 ( void ) xCanBlockIndefinitely;
5188 }
5189 #endif /* INCLUDE_vTaskSuspend */
5190}
5191
5192/* Code below here allows additional code to be inserted into this source file,
5193especially where access to file scope functions and data is needed (for example
5194when performing module tests). */
5195
5196#ifdef FREERTOS_MODULE_TEST
5197 #include "tasks_test_access_functions.h"
5198#endif
5199
5200
5201#if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
5202
5203 #include "freertos_tasks_c_additions.h"
5204
5205 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
5206 static void freertos_tasks_c_additions_init( void )
5207 {
5208 FREERTOS_TASKS_C_ADDITIONS_INIT();
5209 }
5210 #endif
5211
5212#endif
5213
5214