summaryrefslogtreecommitdiff
path: root/rtos/FreeRTOS/Source/queue.c
diff options
context:
space:
mode:
Diffstat (limited to 'rtos/FreeRTOS/Source/queue.c')
-rw-r--r--rtos/FreeRTOS/Source/queue.c427
1 files changed, 313 insertions, 114 deletions
diff --git a/rtos/FreeRTOS/Source/queue.c b/rtos/FreeRTOS/Source/queue.c
index 70de24b..25fcc26 100644
--- a/rtos/FreeRTOS/Source/queue.c
+++ b/rtos/FreeRTOS/Source/queue.c
@@ -1,60 +1,64 @@
/*
- FreeRTOS V8.0.0 - Copyright (C) 2014 Real Time Engineers Ltd.
+ FreeRTOS V8.2.3 - Copyright (C) 2015 Real Time Engineers Ltd.
All rights reserved
VISIT http://www.FreeRTOS.org TO ENSURE YOU ARE USING THE LATEST VERSION.
- ***************************************************************************
- * *
- * FreeRTOS provides completely free yet professionally developed, *
- * robust, strictly quality controlled, supported, and cross *
- * platform software that has become a de facto standard. *
- * *
- * Help yourself get started quickly and support the FreeRTOS *
- * project by purchasing a FreeRTOS tutorial book, reference *
- * manual, or both from: http://www.FreeRTOS.org/Documentation *
- * *
- * Thank you! *
- * *
- ***************************************************************************
-
This file is part of the FreeRTOS distribution.
FreeRTOS is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License (version 2) as published by the
- Free Software Foundation >>!AND MODIFIED BY!<< the FreeRTOS exception.
+ Free Software Foundation >>>> AND MODIFIED BY <<<< the FreeRTOS exception.
- >>! NOTE: The modification to the GPL is included to allow you to distribute
- >>! a combined work that includes FreeRTOS without being obliged to provide
- >>! the source code for proprietary components outside of the FreeRTOS
- >>! kernel.
+ ***************************************************************************
+ >>! NOTE: The modification to the GPL is included to allow you to !<<
+ >>! distribute a combined work that includes FreeRTOS without being !<<
+ >>! obliged to provide the source code for proprietary components !<<
+ >>! outside of the FreeRTOS kernel. !<<
+ ***************************************************************************
FreeRTOS is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- FOR A PARTICULAR PURPOSE. Full license text is available from the following
+ FOR A PARTICULAR PURPOSE. Full license text is available on the following
link: http://www.freertos.org/a00114.html
- 1 tab == 4 spaces!
-
***************************************************************************
* *
- * Having a problem? Start by reading the FAQ "My application does *
- * not run, what could be wrong?" *
+ * FreeRTOS provides completely free yet professionally developed, *
+ * robust, strictly quality controlled, supported, and cross *
+ * platform software that is more than just the market leader, it *
+ * is the industry's de facto standard. *
* *
- * http://www.FreeRTOS.org/FAQHelp.html *
+ * Help yourself get started quickly while simultaneously helping *
+ * to support the FreeRTOS project by purchasing a FreeRTOS *
+ * tutorial book, reference manual, or both: *
+ * http://www.FreeRTOS.org/Documentation *
* *
***************************************************************************
- http://www.FreeRTOS.org - Documentation, books, training, latest versions,
- license and Real Time Engineers Ltd. contact details.
+ http://www.FreeRTOS.org/FAQHelp.html - Having a problem? Start by reading
+ the FAQ page "My application does not run, what could be wrong?". Have you
+ defined configASSERT()?
+
+ http://www.FreeRTOS.org/support - In return for receiving this top quality
+ embedded software for free we request you assist our global community by
+ participating in the support forum.
+
+ http://www.FreeRTOS.org/training - Investing in training allows your team to
+ be as productive as possible as early as possible. Now you can receive
+ FreeRTOS training directly from Richard Barry, CEO of Real Time Engineers
+ Ltd, and the world's leading authority on the world's leading RTOS.
http://www.FreeRTOS.org/plus - A selection of FreeRTOS ecosystem products,
including FreeRTOS+Trace - an indispensable productivity tool, a DOS
compatible FAT file system, and our tiny thread aware UDP/IP stack.
- http://www.OpenRTOS.com - Real Time Engineers ltd license FreeRTOS to High
- Integrity Systems to sell under the OpenRTOS brand. Low cost OpenRTOS
- licenses offer ticketed support, indemnification and middleware.
+ http://www.FreeRTOS.org/labs - Where new FreeRTOS products go to incubate.
+ Come and try FreeRTOS+TCP, our new open source TCP/IP stack for FreeRTOS.
+
+ http://www.OpenRTOS.com - Real Time Engineers ltd. license FreeRTOS to High
+ Integrity Systems ltd. to sell under the OpenRTOS brand. Low cost OpenRTOS
+ licenses offer ticketed support, indemnification and commercial middleware.
http://www.SafeRTOS.com - High Integrity Systems also provide a safety
engineered and independently SIL3 certified version for use in safety and
@@ -120,7 +124,8 @@ zero. */
/*
* Definition of the queue used by the scheduler.
- * Items are queued by copy, not reference.
+ * Items are queued by copy, not reference. See the following link for the
+ * rationale: http://www.freertos.org/Embedded-RTOS-Queues.html
*/
typedef struct QueueDefinition
{
@@ -184,7 +189,7 @@ typedef xQUEUE Queue_t;
/* The queue registry is simply an array of QueueRegistryItem_t structures.
The pcQueueName member of a structure being NULL is indicative of the
array position being vacant. */
- QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
+ PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
#endif /* configQUEUE_REGISTRY_SIZE */
@@ -216,7 +221,7 @@ static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
* Copies an item into the queue, either at the front of the queue or the
* back of the queue.
*/
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
/*
* Copies an item out of a queue.
@@ -315,50 +320,60 @@ QueueHandle_t xReturn = NULL;
configUSE_TRACE_FACILITY not be set to 1. */
( void ) ucQueueType;
- /* Allocate the new queue structure. */
- if( uxQueueLength > ( UBaseType_t ) 0 )
+ configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
+
+ if( uxItemSize == ( UBaseType_t ) 0 )
{
- pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) );
- if( pxNewQueue != NULL )
- {
- /* Create the list of pointers to queue items. The queue is one byte
- longer than asked for to make wrap checking easier/faster. */
- xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
+ /* There is not going to be a queue storage area. */
+ xQueueSizeInBytes = ( size_t ) 0;
+ }
+ else
+ {
+ /* The queue is one byte longer than asked for to make wrap checking
+ easier/faster. */
+ xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ) + ( size_t ) 1; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
+ }
- pxNewQueue->pcHead = ( int8_t * ) pvPortMalloc( xQueueSizeInBytes );
- if( pxNewQueue->pcHead != NULL )
- {
- /* Initialise the queue members as described above where the
- queue type is defined. */
- pxNewQueue->uxLength = uxQueueLength;
- pxNewQueue->uxItemSize = uxItemSize;
- ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
+ /* Allocate the new queue structure and storage area. */
+ pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
- #if ( configUSE_TRACE_FACILITY == 1 )
- {
- pxNewQueue->ucQueueType = ucQueueType;
- }
- #endif /* configUSE_TRACE_FACILITY */
+ if( pxNewQueue != NULL )
+ {
+ if( uxItemSize == ( UBaseType_t ) 0 )
+ {
+ /* No RAM was allocated for the queue storage area, but PC head
+ cannot be set to NULL because NULL is used as a key to say the queue
+ is used as a mutex. Therefore just set pcHead to point to the queue
+ as a benign value that is known to be within the memory map. */
+ pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
+ }
+ else
+ {
+ /* Jump past the queue structure to find the location of the queue
+ storage area. */
+ pxNewQueue->pcHead = ( ( int8_t * ) pxNewQueue ) + sizeof( Queue_t );
+ }
- #if( configUSE_QUEUE_SETS == 1 )
- {
- pxNewQueue->pxQueueSetContainer = NULL;
- }
- #endif /* configUSE_QUEUE_SETS */
+ /* Initialise the queue members as described above where the queue type
+ is defined. */
+ pxNewQueue->uxLength = uxQueueLength;
+ pxNewQueue->uxItemSize = uxItemSize;
+ ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
- traceQUEUE_CREATE( pxNewQueue );
- xReturn = pxNewQueue;
- }
- else
- {
- traceQUEUE_CREATE_FAILED( ucQueueType );
- vPortFree( pxNewQueue );
- }
+ #if ( configUSE_TRACE_FACILITY == 1 )
+ {
+ pxNewQueue->ucQueueType = ucQueueType;
}
- else
+ #endif /* configUSE_TRACE_FACILITY */
+
+ #if( configUSE_QUEUE_SETS == 1 )
{
- mtCOVERAGE_TEST_MARKER();
+ pxNewQueue->pxQueueSetContainer = NULL;
}
+ #endif /* configUSE_QUEUE_SETS */
+
+ traceQUEUE_CREATE( pxNewQueue );
+ xReturn = pxNewQueue;
}
else
{
@@ -429,7 +444,6 @@ QueueHandle_t xReturn = NULL;
traceCREATE_MUTEX_FAILED();
}
- configASSERT( pxNewQueue );
return pxNewQueue;
}
@@ -461,7 +475,7 @@ QueueHandle_t xReturn = NULL;
taskEXIT_CRITICAL();
return pxReturn;
- }
+ } /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
#endif
/*-----------------------------------------------------------*/
@@ -508,7 +522,8 @@ QueueHandle_t xReturn = NULL;
}
else
{
- /* We cannot give the mutex because we are not the holder. */
+ /* The mutex cannot be given because the calling task is not the
+ holder. */
xReturn = pdFAIL;
traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
@@ -543,8 +558,9 @@ QueueHandle_t xReturn = NULL;
{
xReturn = xQueueGenericReceive( pxMutex, NULL, xTicksToWait, pdFALSE );
- /* pdPASS will only be returned if we successfully obtained the mutex,
- we may have blocked to reach here. */
+ /* pdPASS will only be returned if the mutex was successfully
+ obtained. The calling task may have entered the Blocked state
+ before reaching here. */
if( xReturn == pdPASS )
{
( pxMutex->u.uxRecursiveCallCount )++;
@@ -592,7 +608,7 @@ QueueHandle_t xReturn = NULL;
BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
{
-BaseType_t xEntryTimeSet = pdFALSE;
+BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
TimeOut_t xTimeOut;
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
@@ -613,14 +629,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
taskENTER_CRITICAL();
{
- /* Is there room on the queue now? The running task must be
- the highest priority task wanting to access the queue. If
- the head item in the queue is to be overwritten then it does
- not matter if the queue is full. */
+ /* Is there room on the queue now? The running task must be the
+ highest priority task wanting to access the queue. If the head item
+ in the queue is to be overwritten then it does not matter if the
+ queue is full. */
if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
{
traceQUEUE_SEND( pxQueue );
- prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
+ xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
#if ( configUSE_QUEUE_SETS == 1 )
{
@@ -657,6 +673,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
mtCOVERAGE_TEST_MARKER();
}
}
+ else if( xYieldRequired != pdFALSE )
+ {
+ /* This path is a special case that will only get
+ executed if the task was holding multiple mutexes
+ and the mutexes were given back in an order that is
+ different to that in which they were taken. */
+ queueYIELD_IF_USING_PREEMPTION();
+ }
else
{
mtCOVERAGE_TEST_MARKER();
@@ -682,6 +706,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
mtCOVERAGE_TEST_MARKER();
}
}
+ else if( xYieldRequired != pdFALSE )
+ {
+ /* This path is a special case that will only get
+ executed if the task was holding multiple mutexes and
+ the mutexes were given back in an order that is
+ different to that in which they were taken. */
+ queueYIELD_IF_USING_PREEMPTION();
+ }
else
{
mtCOVERAGE_TEST_MARKER();
@@ -690,9 +722,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
#endif /* configUSE_QUEUE_SETS */
taskEXIT_CRITICAL();
-
- /* Return to the original privilege level before exiting the
- function. */
return pdPASS;
}
else
@@ -928,7 +957,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
traceQUEUE_PEEK( pxQueue );
- /* We are not removing the data, so reset our read
+ /* The data is not being removed, so reset our read
pointer. */
pxQueue->u.pcReadFrom = pcOriginalReadPosition;
@@ -1059,7 +1088,12 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
traceQUEUE_SEND_FROM_ISR( pxQueue );
- prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
+ /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
+ semaphore or mutex. That means prvCopyDataToQueue() cannot result
+ in a task disinheriting a priority and prvCopyDataToQueue() can be
+ called here even though the disinherit function does not check if
+ the scheduler is suspended before accessing the ready lists. */
+ ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
/* The event list is not altered if the queue is locked. This will
be done when the queue is unlocked later. */
@@ -1094,8 +1128,169 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
- /* The task waiting has a higher priority so record that a
- context switch is required. */
+ /* The task waiting has a higher priority so
+ record that a context switch is required. */
+ if( pxHigherPriorityTaskWoken != NULL )
+ {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ }
+ #else /* configUSE_QUEUE_SETS */
+ {
+ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
+ {
+ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
+ {
+ /* The task waiting has a higher priority so record that a
+ context switch is required. */
+ if( pxHigherPriorityTaskWoken != NULL )
+ {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ #endif /* configUSE_QUEUE_SETS */
+ }
+ else
+ {
+ /* Increment the lock count so the task that unlocks the queue
+ knows that data was posted while it was locked. */
+ ++( pxQueue->xTxLock );
+ }
+
+ xReturn = pdPASS;
+ }
+ else
+ {
+ traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
+ xReturn = errQUEUE_FULL;
+ }
+ }
+ portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
+
+ return xReturn;
+}
+/*-----------------------------------------------------------*/
+
+BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
+{
+BaseType_t xReturn;
+UBaseType_t uxSavedInterruptStatus;
+Queue_t * const pxQueue = ( Queue_t * ) xQueue;
+
+ /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
+ item size is 0. Don't directly wake a task that was blocked on a queue
+ read, instead return a flag to say whether a context switch is required or
+ not (i.e. has a task with a higher priority than us been woken by this
+ post). */
+
+ configASSERT( pxQueue );
+
+ /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
+ if the item size is not 0. */
+ configASSERT( pxQueue->uxItemSize == 0 );
+
+ /* Normally a mutex would not be given from an interrupt, especially if
+ there is a mutex holder, as priority inheritance makes no sense for an
+ interrupts, only tasks. */
+ configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->pxMutexHolder != NULL ) ) );
+
+ /* RTOS ports that support interrupt nesting have the concept of a maximum
+ system call (or maximum API call) interrupt priority. Interrupts that are
+ above the maximum system call priority are kept permanently enabled, even
+ when the RTOS kernel is in a critical section, but cannot make any calls to
+ FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
+ then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
+ failure if a FreeRTOS API function is called from an interrupt that has been
+ assigned a priority above the configured maximum system call priority.
+ Only FreeRTOS functions that end in FromISR can be called from interrupts
+ that have been assigned a priority at or (logically) below the maximum
+ system call interrupt priority. FreeRTOS maintains a separate interrupt
+ safe API to ensure interrupt entry is as fast and as simple as possible.
+ More information (albeit Cortex-M specific) is provided on the following
+ link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
+ portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
+
+ uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
+ {
+ /* When the queue is used to implement a semaphore no data is ever
+ moved through the queue but it is still valid to see if the queue 'has
+ space'. */
+ if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
+ {
+ traceQUEUE_SEND_FROM_ISR( pxQueue );
+
+ /* A task can only have an inherited priority if it is a mutex
+ holder - and if there is a mutex holder then the mutex cannot be
+ given from an ISR. As this is the ISR version of the function it
+ can be assumed there is no mutex holder and no need to determine if
+ priority disinheritance is needed. Simply increase the count of
+ messages (semaphores) available. */
+ ++( pxQueue->uxMessagesWaiting );
+
+ /* The event list is not altered if the queue is locked. This will
+ be done when the queue is unlocked later. */
+ if( pxQueue->xTxLock == queueUNLOCKED )
+ {
+ #if ( configUSE_QUEUE_SETS == 1 )
+ {
+ if( pxQueue->pxQueueSetContainer != NULL )
+ {
+ if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) == pdTRUE )
+ {
+ /* The semaphore is a member of a queue set, and
+ posting to the queue set caused a higher priority
+ task to unblock. A context switch is required. */
+ if( pxHigherPriorityTaskWoken != NULL )
+ {
+ *pxHigherPriorityTaskWoken = pdTRUE;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
+ }
+ else
+ {
+ if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
+ {
+ if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
+ {
+ /* The task waiting has a higher priority so
+ record that a context switch is required. */
if( pxHigherPriorityTaskWoken != NULL )
{
*pxHigherPriorityTaskWoken = pdTRUE;
@@ -1189,8 +1384,8 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
taskENTER_CRITICAL();
{
- /* Is there data in the queue now? To be running we must be
- the highest priority task wanting to access the queue. */
+ /* Is there data in the queue now? To be running the calling task
+ must be the highest priority task wanting to access the queue. */
if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
{
/* Remember the read position in case the queue is only being
@@ -1212,14 +1407,14 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
{
/* Record the information required to implement
priority inheritance should it become necessary. */
- pxQueue->pxMutexHolder = ( int8_t * ) xTaskGetCurrentTaskHandle(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
+ pxQueue->pxMutexHolder = ( int8_t * ) pvTaskIncrementMutexHeldCount(); /*lint !e961 Cast is not redundant as TaskHandle_t is a typedef. */
}
else
{
mtCOVERAGE_TEST_MARKER();
}
}
- #endif
+ #endif /* configUSE_MUTEXES */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
{
@@ -1249,8 +1444,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
any other tasks waiting for the data. */
if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
{
- /* Tasks that are removed from the event list will get added to
- the pending ready list as the scheduler is still suspended. */
if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
{
/* The task waiting has a higher priority than this task. */
@@ -1451,6 +1644,7 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
configASSERT( pxQueue );
configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
+ configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
/* RTOS ports that support interrupt nesting have the concept of a maximum
system call (or maximum API call) interrupt priority. Interrupts that are
@@ -1553,10 +1747,6 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
vQueueUnregisterQueue( pxQueue );
}
#endif
- if( pxQueue->pcHead != NULL )
- {
- vPortFree( pxQueue->pcHead );
- }
vPortFree( pxQueue );
}
/*-----------------------------------------------------------*/
@@ -1591,8 +1781,10 @@ Queue_t * const pxQueue = ( Queue_t * ) xQueue;
#endif /* configUSE_TRACE_FACILITY */
/*-----------------------------------------------------------*/
-static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
+static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
{
+BaseType_t xReturn = pdFALSE;
+
if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
{
#if ( configUSE_MUTEXES == 1 )
@@ -1600,7 +1792,7 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
{
/* The mutex is no longer being held. */
- vTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
+ xReturn = xTaskPriorityDisinherit( ( void * ) pxQueue->pxMutexHolder );
pxQueue->pxMutexHolder = NULL;
}
else
@@ -1658,12 +1850,14 @@ static void prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQue
}
++( pxQueue->uxMessagesWaiting );
+
+ return xReturn;
}
/*-----------------------------------------------------------*/
static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
{
- if( pxQueue->uxQueueType != queueQUEUE_IS_MUTEX )
+ if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
{
pxQueue->u.pcReadFrom += pxQueue->uxItemSize;
if( pxQueue->u.pcReadFrom >= pxQueue->pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
@@ -1676,10 +1870,6 @@ static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer
}
( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 MISRA exception as the casts are only redundant for some ports. Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0. */
}
- else
- {
- mtCOVERAGE_TEST_MARKER();
- }
}
/*-----------------------------------------------------------*/
@@ -2209,7 +2399,7 @@ BaseType_t xReturn;
#if ( configUSE_TIMERS == 1 )
- void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait )
+ void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
{
Queue_t * const pxQueue = ( Queue_t * ) xQueue;
@@ -2231,7 +2421,7 @@ BaseType_t xReturn;
if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
{
/* There is nothing in the queue, block for the specified period. */
- vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
+ vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
}
else
{
@@ -2367,14 +2557,23 @@ BaseType_t xReturn;
if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
{
traceQUEUE_SEND( pxQueueSetContainer );
- /* The data copies is the handle of the queue that contains data. */
- prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
- if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
+
+ /* The data copied is the handle of the queue that contains data. */
+ xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
+
+ if( pxQueueSetContainer->xTxLock == queueUNLOCKED )
{
- if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
+ if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
{
- /* The task waiting has a higher priority */
- xReturn = pdTRUE;
+ if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
+ {
+ /* The task waiting has a higher priority. */
+ xReturn = pdTRUE;
+ }
+ else
+ {
+ mtCOVERAGE_TEST_MARKER();
+ }
}
else
{
@@ -2383,7 +2582,7 @@ BaseType_t xReturn;
}
else
{
- mtCOVERAGE_TEST_MARKER();
+ ( pxQueueSetContainer->xTxLock )++;
}
}
else