
This is required at least on ARM.
When sending instead of simply invalidating the entire descriptor, flush as little as possible while still respecting ARCH_DMA_MINALIGN, as requested by Alexey.
Signed-off-by: Ian Campbell ijc@hellion.org.uk Cc: Alexey Brodkin abrodkin@synopsys.com --- v2: - collapsed "net/designware: align cache invalidation on rx" and "net/designware: invalidate entire descriptor in dw_eth_send" into one. - roundup sizeof(txrx_status) to ARCH_DMA_MINALIGN instead of just invalidating the entire descriptor. --- drivers/net/designware.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/drivers/net/designware.c b/drivers/net/designware.c index 41ab3ac..fa816bf 100644 --- a/drivers/net/designware.c +++ b/drivers/net/designware.c @@ -280,10 +280,18 @@ static int dw_eth_send(struct eth_device *dev, void *packet, int length) u32 desc_num = priv->tx_currdescnum; struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
- /* Invalidate only "status" field for the following check */ - invalidate_dcache_range((unsigned long)&desc_p->txrx_status, - (unsigned long)&desc_p->txrx_status + - sizeof(desc_p->txrx_status)); + /* + * Strictly we only need to invalidate the "txrx_status" field + * for the following check, but on some platforms we cannot + * invalidate only 4 bytes, so roundup to + * ARCH_DMA_MINALIGN. This is safe because the individual + * descriptors in the array are each aligned to + * ARCH_DMA_MINALIGN. + */ + invalidate_dcache_range( + (unsigned long)desc_p, + (unsigned long)desc_p + + roundup(sizeof(desc_p->txrx_status), ARCH_DMA_MINALIGN));
/* Check if the descriptor is owned by CPU */ if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) { @@ -351,7 +359,7 @@ static int dw_eth_recv(struct eth_device *dev) /* Invalidate received data */ invalidate_dcache_range((unsigned long)desc_p->dmamac_addr, (unsigned long)desc_p->dmamac_addr + - length); + roundup(length, ARCH_DMA_MINALIGN));
NetReceive(desc_p->dmamac_addr, length);