
The current calculation will omit doing a flush/invalidate on the last cacheline if the base address is not aligned with DMA_MINALIGN.
This causes commands failures and write corruptions on Qualcomm platforms.
Signed-off-by: Neil Armstrong neil.armstrong@linaro.org --- drivers/ufs/ufs.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/drivers/ufs/ufs.c b/drivers/ufs/ufs.c index bc86903245c..db487d1ff9a 100644 --- a/drivers/ufs/ufs.c +++ b/drivers/ufs/ufs.c @@ -704,11 +704,11 @@ static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) */ static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size) { - uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); - unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN); + uintptr_t start_addr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); + uintptr_t end_addr = ALIGN((uintptr_t)addr + size, ARCH_DMA_MINALIGN);
- flush_dcache_range(aaddr, aaddr + asize); - invalidate_dcache_range(aaddr, aaddr + asize); + flush_dcache_range(start_addr, end_addr); + invalidate_dcache_range(start_addr, end_addr); }
/** @@ -1467,13 +1467,13 @@ static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) }
if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */ - flush_dcache_range(aaddr, aaddr + - ALIGN(datalen, ARCH_DMA_MINALIGN)); + flush_dcache_range(aaddr, + ALIGN((uintptr_t)pccb->pdata + datalen, ARCH_DMA_MINALIGN)); }
/* In any case, invalidate cache to avoid stale data in it. */ - invalidate_dcache_range(aaddr, aaddr + - ALIGN(datalen, ARCH_DMA_MINALIGN)); + invalidate_dcache_range(aaddr, + ALIGN((uintptr_t)pccb->pdata + datalen, ARCH_DMA_MINALIGN));
table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); buf = pccb->pdata;