Skip to content

Commit 2fd15f4

Browse files
committed
lwip - Fixed memory leak in k64f cyclic-buffer overflow
This was actually several bugs colluding together. 1. Confusion on the buffer-semaphore paradigm used led to misuse of the tx semaphore and potential for odd behaviour. 2. Equality tests on tx_consume_index and tx_produce_index did not handle overflow correctly. This would allow tx_consume_index to catch up to tx_produce_index and trick the k64f_rx_reclaim function into forgetting about a whole buffer of pbufs. 3. On top of all of that, the ENET_BUFFDESCRIPTOR_TX_READ_MASK was not correctly read immediately after being set due to either a compiler optimization or hardware delays. This caused k64f_low_level_output to eagerly overrun existing buff-descriptors before they had been completely sent. Adopting the counting-semaphore paradigm for 1 avoided this concern. As pointed out by @infinnovation, the overflow only occurs in the rare case that the 120MHz CPU can actually generate packets faster than the ENET hardware can transmit on a 100Mbps link.
1 parent d7c02a1 commit 2fd15f4

File tree

1 file changed

+8
-11
lines changed
  • features/FEATURE_LWIP/lwip-interface/lwip-eth/arch/TARGET_Freescale

1 file changed

+8
-11
lines changed

features/FEATURE_LWIP/lwip-interface/lwip-eth/arch/TARGET_Freescale/k64f_emac.c

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -102,24 +102,22 @@ static void update_read_buffer(uint8_t *buf)
102102
*/
103103
static void k64f_tx_reclaim(struct k64f_enetdata *k64f_enet)
104104
{
105-
uint8_t i = 0 ;
106-
107105
/* Get exclusive access */
108106
sys_mutex_lock(&k64f_enet->TXLockMutex);
109107

110-
i = k64f_enet->tx_consume_index;
111108
// Traverse all descriptors, looking for the ones modified by the uDMA
112-
while((i != k64f_enet->tx_produce_index) && (!(g_handle.txBdDirty->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))) {
113-
pbuf_free(tx_buff[i]);
109+
while((k64f_enet->tx_consume_index != k64f_enet->tx_produce_index) &&
110+
(!(g_handle.txBdDirty->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK))) {
111+
pbuf_free(tx_buff[k64f_enet->tx_consume_index % ENET_TX_RING_LEN]);
114112
if (g_handle.txBdDirty->control & ENET_BUFFDESCRIPTOR_TX_WRAP_MASK)
115113
g_handle.txBdDirty = g_handle.txBdBase;
116114
else
117115
g_handle.txBdDirty++;
118116

119-
i = (i + 1) % ENET_TX_RING_LEN;
117+
k64f_enet->tx_consume_index += 1;
118+
osSemaphoreRelease(k64f_enet->xTXDCountSem.id);
120119
}
121120

122-
k64f_enet->tx_consume_index = i;
123121
/* Restore access */
124122
sys_mutex_unlock(&k64f_enet->TXLockMutex);
125123
}
@@ -526,15 +524,14 @@ static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
526524

527525
/* Wait until a descriptor is available for the transfer. */
528526
/* THIS WILL BLOCK UNTIL THERE ARE A DESCRIPTOR AVAILABLE */
529-
while (g_handle.txBdCurrent->control & ENET_BUFFDESCRIPTOR_TX_READY_MASK)
530-
osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);
527+
osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);
531528

532529
/* Get exclusive access */
533530
sys_mutex_lock(&k64f_enet->TXLockMutex);
534531

535532
/* Save the buffer so that it can be freed when transmit is done */
536-
tx_buff[k64f_enet->tx_produce_index] = temp_pbuf;
537-
k64f_enet->tx_produce_index = (k64f_enet->tx_produce_index + 1) % ENET_TX_RING_LEN;
533+
tx_buff[k64f_enet->tx_produce_index % ENET_TX_RING_LEN] = temp_pbuf;
534+
k64f_enet->tx_produce_index += 1;
538535

539536
/* Setup transfers */
540537
g_handle.txBdCurrent->buffer = psend;

0 commit comments

Comments
 (0)