
Flush caches when pushing an element to ring and invalidate caches when popping an element from ring in Exposed Ring mode. Otherwise DMA transfers don't work properly in R5 SPL (with caches enabled) where the core is not in coherency domain.
Signed-off-by: Vignesh Raghavendra vigneshr@ti.com Reviewed-by: Grygorii Strashko grygorii.strashko@ti.com --- drivers/soc/ti/k3-navss-ringacc.c | 11 +++++++++++ 1 file changed, 11 insertions(+)
diff --git a/drivers/soc/ti/k3-navss-ringacc.c b/drivers/soc/ti/k3-navss-ringacc.c index 64ebc0ba0030..f06ea29c986c 100644 --- a/drivers/soc/ti/k3-navss-ringacc.c +++ b/drivers/soc/ti/k3-navss-ringacc.c @@ -6,6 +6,7 @@ */
#include <common.h> +#include <cpu_func.h> #include <asm/io.h> #include <malloc.h> #include <asm/dma-mapping.h> @@ -807,6 +808,11 @@ static int k3_nav_ringacc_ring_push_mem(struct k3_nav_ring *ring, void *elem)
memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ flush_dcache_range((unsigned long)ring->ring_mem_virt, + ALIGN((unsigned long)ring->ring_mem_virt + + ring->size * (4 << ring->elm_size), + ARCH_DMA_MINALIGN)); + ring->windex = (ring->windex + 1) % ring->size; ring->free--; ringacc_writel(1, &ring->rt->db); @@ -823,6 +829,11 @@ static int k3_nav_ringacc_ring_pop_mem(struct k3_nav_ring *ring, void *elem)
elem_ptr = k3_nav_ringacc_get_elm_addr(ring, ring->rindex);
+ invalidate_dcache_range((unsigned long)ring->ring_mem_virt, + ALIGN((unsigned long)ring->ring_mem_virt + + ring->size * (4 << ring->elm_size), + ARCH_DMA_MINALIGN)); + memcpy(elem, elem_ptr, (4 << ring->elm_size));
ring->rindex = (ring->rindex + 1) % ring->size;