
Hi Miquel,
just a quick comment below (something I stumbled upon while testing).
On 11.07.2018 17:25, Miquel Raynal wrote:
From: Boris Brezillon boris.brezillon@bootlin.com
Some controllers are exposing high-level interfaces to access various kind of SPI memories. Unfortunately they do not fit in the current spi_controller model and usually have drivers placed in drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI memories in general.
This is an attempt at defining a SPI memory interface which works for all kinds of SPI memories (NORs, NANDs, SRAMs).
Signed-off-by: Boris Brezillon boris.brezillon@bootlin.com Signed-off-by: Miquel Raynal miquel.raynal@bootlin.com
drivers/spi/Kconfig | 7 + drivers/spi/Makefile | 1 + drivers/spi/spi-mem.c | 500 ++++++++++++++++++++++++++++++++++++++++++++++++++ include/spi-mem.h | 258 ++++++++++++++++++++++++++ include/spi.h | 11 ++ 5 files changed, 777 insertions(+) create mode 100644 drivers/spi/spi-mem.c create mode 100644 include/spi-mem.h
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 235a8c7d73..0ee371b2d9 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -15,6 +15,13 @@ config DM_SPI
if DM_SPI
+config SPI_MEM
- bool "SPI memory extension"
- help
Enable this option if you want to enable the SPI memory extension.
This extension is meant to simplify interaction with SPI memories
by providing an high-level interface to send memory-like commands.
- config ALTERA_SPI bool "Altera SPI driver" help
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 4b6000fd9a..982529a0e6 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -10,6 +10,7 @@ ifdef CONFIG_DM_SPI obj-y += spi-uclass.o obj-$(CONFIG_SANDBOX) += spi-emul-uclass.o obj-$(CONFIG_SOFT_SPI) += soft_spi.o +obj-$(CONFIG_SPI_MEM) += spi-mem.o else obj-y += spi.o obj-$(CONFIG_SOFT_SPI) += soft_spi_legacy.o diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c new file mode 100644 index 0000000000..1aabe56819 --- /dev/null +++ b/drivers/spi/spi-mem.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0+ +/*
- Copyright (C) 2018 Exceet Electronics GmbH
- Copyright (C) 2018 Bootlin
- Author: Boris Brezillon boris.brezillon@bootlin.com
- */
+#ifndef __UBOOT__ +#include <linux/dmaengine.h> +#include <linux/pm_runtime.h> +#include "internals.h" +#else +#include <spi.h> +#include <spi-mem.h> +#endif
+#ifndef __UBOOT__ +/**
- spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
memory operation
- @ctlr: the SPI controller requesting this dma_map()
- @op: the memory operation containing the buffer to map
- @sgt: a pointer to a non-initialized sg_table that will be filled by this
function
- Some controllers might want to do DMA on the data buffer embedded in @op.
- This helper prepares everything for you and provides a ready-to-use
- sg_table. This function is not intended to be called from spi drivers.
- Only SPI controller drivers should use it.
- Note that the caller must ensure the memory region pointed by
- op->data.buf.{in,out} is DMA-able before calling this function.
- Return: 0 in case of success, a negative error code otherwise.
- */
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
+{
- struct device *dmadev;
- if (!op->data.nbytes)
return -EINVAL;
- if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
- else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
- else
dmadev = ctlr->dev.parent;
- if (!dmadev)
return -EINVAL;
- return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+} +EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
+/**
- spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
memory operation
- @ctlr: the SPI controller requesting this dma_unmap()
- @op: the memory operation containing the buffer to unmap
- @sgt: a pointer to an sg_table previously initialized by
spi_controller_dma_map_mem_op_data()
- Some controllers might want to do DMA on the data buffer embedded in @op.
- This helper prepares things so that the CPU can access the
- op->data.buf.{in,out} buffer again.
- This function is not intended to be called from SPI drivers. Only SPI
- controller drivers should use it.
- This function should be called after the DMA operation has finished and is
- only valid if the previous spi_controller_dma_map_mem_op_data() call
- returned 0.
- Return: 0 in case of success, a negative error code otherwise.
- */
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sgt)
+{
- struct device *dmadev;
- if (!op->data.nbytes)
return;
- if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
dmadev = ctlr->dma_tx->device->dev;
- else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
dmadev = ctlr->dma_rx->device->dev;
- else
dmadev = ctlr->dev.parent;
- spi_unmap_buf(ctlr, dmadev, sgt,
op->data.dir == SPI_MEM_DATA_IN ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
+} +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); +#endif /* __UBOOT__ */
+static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx) +{
- u32 mode = slave->mode;
- switch (buswidth) {
- case 1:
return 0;
- case 2:
if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
(!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
return 0;
break;
- case 4:
if ((tx && (mode & SPI_TX_QUAD)) ||
(!tx && (mode & SPI_RX_QUAD)))
return 0;
break;
- default:
break;
- }
- return -ENOTSUPP;
+}
+bool spi_mem_default_supports_op(struct spi_slave *slave,
const struct spi_mem_op *op)
+{
- if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
return false;
- if (op->addr.nbytes &&
spi_check_buswidth_req(slave, op->addr.buswidth, true))
return false;
- if (op->dummy.nbytes &&
spi_check_buswidth_req(slave, op->dummy.buswidth, true))
return false;
- if (op->data.nbytes &&
spi_check_buswidth_req(slave, op->data.buswidth,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
- return true;
+} +EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
+/**
- spi_mem_supports_op() - Check if a memory device and the controller it is
connected to support a specific memory operation
- @slave: the SPI device
- @op: the memory operation to check
- Some controllers are only supporting Single or Dual IOs, others might only
- support specific opcodes, or it can even be that the controller and device
- both support Quad IOs but the hardware prevents you from using it because
- only 2 IO lines are connected.
- This function checks whether a specific operation is supported.
- Return: true if @op is supported, false otherwise.
- */
+bool spi_mem_supports_op(struct spi_slave *slave,
const struct spi_mem_op *op)
+{
- struct udevice *bus = slave->dev->parent;
- struct dm_spi_ops *ops = spi_get_ops(bus);
- if (ops->mem_ops && ops->mem_ops->supports_op)
return ops->mem_ops->supports_op(slave, op);
- return spi_mem_default_supports_op(slave, op);
+} +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+/**
- spi_mem_exec_op() - Execute a memory operation
- @slave: the SPI device
- @op: the memory operation to execute
- Executes a memory operation.
- This function first checks that @op is supported and then tries to execute
- it.
- Return: 0 in case of success, a negative error code otherwise.
- */
+int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) +{
- bool rx_data = op->data.nbytes && (op->data.dir == SPI_MEM_DATA_IN);
- bool tx_data = op->data.nbytes && (op->data.dir == SPI_MEM_DATA_OUT);
- struct udevice *bus = slave->dev->parent;
- struct dm_spi_ops *ops = spi_get_ops(bus);
- unsigned int xfer_len, pos = 0;
- u8 *tx_buf, *rx_buf = NULL;
- int ret;
- int i;
- if (!spi_mem_supports_op(slave, op))
return -ENOTSUPP;
- if (ops->mem_ops) {
+#ifndef __UBOOT__
/*
* Flush the message queue before executing our SPI memory
* operation to prevent preemption of regular SPI transfers.
*/
spi_flush_queue(ctlr);
if (ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
dev_err(&ctlr->dev,
"Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&ctlr->bus_lock_mutex);
mutex_lock(&ctlr->io_mutex);
+#endif
ret = ops->mem_ops->exec_op(slave, op);
+#ifndef __UBOOT__
mutex_unlock(&ctlr->io_mutex);
mutex_unlock(&ctlr->bus_lock_mutex);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
+#endif
/*
* Some controllers only optimize specific paths (typically the
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
if (!ret || ret != -ENOTSUPP)
return ret;
- }
+#ifndef __UBOOT__
- tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
op->dummy.nbytes;
- /*
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
* we're guaranteed that this buffer is DMA-able, as required by the
* SPI layer.
*/
- tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
- if (!tmpbuf)
return -ENOMEM;
- spi_message_init(&msg);
- tmpbuf[0] = op->cmd.opcode;
- xfers[xferpos].tx_buf = tmpbuf;
- xfers[xferpos].len = sizeof(op->cmd.opcode);
- xfers[xferpos].tx_nbits = op->cmd.buswidth;
- spi_message_add_tail(&xfers[xferpos], &msg);
- xferpos++;
- totalxferlen++;
- if (op->addr.nbytes) {
int i;
for (i = 0; i < op->addr.nbytes; i++)
tmpbuf[i + 1] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
xfers[xferpos].tx_buf = tmpbuf + 1;
xfers[xferpos].len = op->addr.nbytes;
xfers[xferpos].tx_nbits = op->addr.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->addr.nbytes;
- }
- if (op->dummy.nbytes) {
memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
xfers[xferpos].len = op->dummy.nbytes;
xfers[xferpos].tx_nbits = op->dummy.buswidth;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->dummy.nbytes;
- }
- if (op->data.nbytes) {
if (op->data.dir == SPI_MEM_DATA_IN) {
xfers[xferpos].rx_buf = op->data.buf.in;
xfers[xferpos].rx_nbits = op->data.buswidth;
} else {
xfers[xferpos].tx_buf = op->data.buf.out;
xfers[xferpos].tx_nbits = op->data.buswidth;
}
xfers[xferpos].len = op->data.nbytes;
spi_message_add_tail(&xfers[xferpos], &msg);
xferpos++;
totalxferlen += op->data.nbytes;
- }
- ret = spi_sync(slave, &msg);
- kfree(tmpbuf);
- if (ret)
return ret;
- if (msg.actual_length != totalxferlen)
return -EIO;
+#else
- /* U-Boot does not support parallel SPI data lanes */
- if ((op->cmd.buswidth != 1) ||
(op->addr.nbytes && op->addr.buswidth != 1) ||
(op->dummy.nbytes && op->dummy.buswidth != 1) ||
(op->data.nbytes && op->data.buswidth != 1)) {
printf("Dual/Quad raw SPI transfers not supported\n");
return -ENOTSUPP;
- }
- xfer_len = sizeof(op->cmd.opcode) + op->addr.nbytes +
op->dummy.nbytes + op->data.nbytes;
- /*
* Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
* we're guaranteed that this buffer is DMA-able, as required by the
* SPI layer.
*/
- tx_buf = kzalloc(xfer_len, GFP_KERNEL);
- if (!tx_buf)
return -ENOMEM;
- if (rx_data) {
rx_buf = kzalloc(xfer_len, GFP_KERNEL);
if (!rx_buf)
return -ENOMEM;
- }
- ret = spi_claim_bus(slave);
- if (ret < 0)
return ret;
- tx_buf[pos++] = op->cmd.opcode;
- if (op->addr.nbytes) {
for (i = 0; i < op->addr.nbytes; i++)
tx_buf[pos + i] = op->addr.val >>
(8 * (op->addr.nbytes - i - 1));
pos += op->addr.nbytes;
- }
- if (op->dummy.nbytes) {
memset(tx_buf + pos, 0xff, op->dummy.nbytes);
pos += op->dummy.nbytes;
- }
- if (tx_data)
memcpy(tx_buf + pos, op->data.buf.out, op->data.nbytes);
- ret = spi_xfer(slave, xfer_len * 8, tx_buf, rx_buf,
SPI_XFER_BEGIN | SPI_XFER_END);
- spi_release_bus(slave);
- for (i = 0; i < pos; i++)
debug("%02x ", tx_buf[i]);
- debug("| [%dB %s] ",
tx_data || rx_data ? op->data.nbytes : 0,
tx_data || rx_data ? (tx_data ? "out" : "in") : "-");
- for (; i < xfer_len; i++)
debug("%02x ", rx_buf[i]);
debug("%02x ", tx_data ? tx_buf[i] : rx_buf[i]);
I won't have much more time today. Will get back to this in ~2 weeks.
Thanks, Stefan