From 4fa05774622bafdeea0d2af02e16f1c3db777382 Mon Sep 17 00:00:00 2001 From: Chandana Kishori Chiluveru Date: Tue, 29 Aug 2023 23:35:11 -0700 Subject: [PATCH] q2spi-msm-geni: Add q2spi interface drivers support Add q2spi interface drivers support. Change-Id: I8f3fb11ce654da8fb83fa0b46dd645b2449fb16e Signed-off-by: Chandana Kishori Chiluveru --- drivers/dma/qcom/msm_gpi.c | 147 +- drivers/dma/qcom/msm_gpi_mmio.h | 27 +- drivers/spi/Kconfig | 12 + drivers/spi/Makefile | 2 + drivers/spi/q2spi-gsi.c | 632 ++++++ drivers/spi/q2spi-gsi.h | 77 + drivers/spi/q2spi-msm-geni.c | 3031 +++++++++++++++++++++++++++ drivers/spi/q2spi-msm.h | 556 +++++ drivers/spi/q2spi-slave-reg.h | 36 + drivers/spi/q2spi-trace.h | 35 + include/linux/msm_gpi.h | 33 + include/linux/qcom-geni-se-common.h | 2 + include/linux/soc/qcom/geni-se.h | 3 +- 13 files changed, 4578 insertions(+), 15 deletions(-) create mode 100644 drivers/spi/q2spi-gsi.c create mode 100644 drivers/spi/q2spi-gsi.h create mode 100644 drivers/spi/q2spi-msm-geni.c create mode 100644 drivers/spi/q2spi-msm.h create mode 100644 drivers/spi/q2spi-slave-reg.h create mode 100644 drivers/spi/q2spi-trace.h diff --git a/drivers/dma/qcom/msm_gpi.c b/drivers/dma/qcom/msm_gpi.c index 59d921652f37..216af87e22fb 100644 --- a/drivers/dma/qcom/msm_gpi.c +++ b/drivers/dma/qcom/msm_gpi.c @@ -161,6 +161,20 @@ struct __packed xfer_compl_event { u8 chid; }; +struct __packed qup_q2spi_status { + u32 ptr_l; + u32 ptr_h : 8; + u32 resvd_0 : 8; + u32 value : 8; + u32 resvd_1 : 8; + u32 length : 20; + u32 resvd_2 : 4; + u8 code : 8; + u16 status : 16; + u8 type : 8; + u8 ch_id : 8; +}; + struct __packed immediate_data_event { u8 data_bytes[8]; u8 length : 4; @@ -186,18 +200,13 @@ struct __packed gpi_ere { u32 dword[4]; }; -enum GPI_EV_TYPE { - XFER_COMPLETE_EV_TYPE = 0x22, - IMMEDIATE_DATA_EV_TYPE = 0x30, - QUP_NOTIF_EV_TYPE = 0x31, - STALE_EV_TYPE = 0xFF, -}; - union __packed gpi_event { struct __packed xfer_compl_event xfer_compl_event; struct __packed immediate_data_event immediate_data_event; struct __packed qup_notif_event qup_notif_event; struct __packed gpi_ere gpi_ere; + struct __packed qup_q2spi_status q2spi_status; + struct __packed qup_q2spi_cr_header_event q2spi_cr_header_event; }; enum gpii_irq_settings { @@ -298,6 +307,7 @@ enum se_protocol { SE_PROTOCOL_SPI = 1, SE_PROTOCOL_UART = 2, SE_PROTOCOL_I2C = 3, + SE_PROTOCOL_Q2SPI = 0xE, SE_MAX_PROTOCOL }; @@ -538,6 +548,7 @@ struct gpii_chan { struct virt_dma_chan vc; u32 chid; u32 seid; + u8 init_config:1; enum se_protocol protocol; enum EV_PRIORITY priority; /* comes from clients DT node */ struct gpii *gpii; @@ -1700,12 +1711,118 @@ static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan, tx_cb_param->length = compl_event->length; tx_cb_param->completion_code = compl_event->code; tx_cb_param->status = compl_event->status; + tx_cb_param->tce_type = compl_event->type; + GPII_INFO(gpii, gpii_chan->chid, "tx_cb_param:%p\n", tx_cb_param); + vd->tx.callback(tx_cb_param); + } +gpi_free_desc: + gpi_free_chan_desc(gpii_chan); +} + +/* process Q2SPI_STATUS TCE notification event */ +static void +gpi_process_qup_q2spi_status(struct gpii_chan *gpii_chan, + struct qup_q2spi_status *q2spi_status_event) +{ + struct gpii *gpii = gpii_chan->gpii; + struct gpi_ring *ch_ring = gpii_chan->ch_ring; + void *ev_rp = to_virtual(ch_ring, q2spi_status_event->ptr_l); + struct virt_dma_desc *vd; + struct msm_gpi_dma_async_tx_cb_param *tx_cb_param; + struct gpi_desc *gpi_desc; + unsigned long flags; + + /* only process events on active channel */ + if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) { + GPII_ERR(gpii, gpii_chan->chid, "skipping processing event because ch @ %s state\n", + TO_GPI_PM_STR(gpii_chan->pm_state)); + gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT, __LINE__); + return; + } + + spin_lock_irqsave(&gpii_chan->vc.lock, flags); + vd = vchan_next_desc(&gpii_chan->vc); + if (!vd) { + struct gpi_ere *gpi_ere; + + spin_unlock_irqrestore(&gpii_chan->vc.lock, flags); + GPII_ERR(gpii, gpii_chan->chid, + "Event without a pending descriptor!\n"); + gpi_ere = (struct gpi_ere *)q2spi_status_event; + GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n", + gpi_ere->dword[0], gpi_ere->dword[1], + gpi_ere->dword[2], gpi_ere->dword[3]); + gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH, __LINE__); + return; + } + gpi_desc = to_gpi_desc(vd); + spin_unlock_irqrestore(&gpii_chan->vc.lock, flags); + + /* + * RP pointed by Event is to last TRE processed, + * we need to update ring rp to ev_rp + 1 + */ + ev_rp += ch_ring->el_size; + if (ev_rp >= (ch_ring->base + ch_ring->len)) + ev_rp = ch_ring->base; + ch_ring->rp = ev_rp; + /* update must be visible to other cores */ + smp_wmb(); + + if (q2spi_status_event->code == MSM_GPI_TCE_EOB) { + if (gpii->protocol != SE_PROTOCOL_Q2SPI) + goto gpi_free_desc; + } + + tx_cb_param = vd->tx.callback_param; + if (vd->tx.callback && tx_cb_param) { + GPII_VERB(gpii, gpii_chan->chid, + "cb_length:%u code:0x%x type:0x%x status:0x%x q2spi_status:0x%x\n", + q2spi_status_event->length, q2spi_status_event->code, + q2spi_status_event->type, q2spi_status_event->status, + q2spi_status_event->value); + tx_cb_param->length = q2spi_status_event->length; + tx_cb_param->completion_code = q2spi_status_event->code; + tx_cb_param->tce_type = q2spi_status_event->type; + tx_cb_param->status = q2spi_status_event->status; + tx_cb_param->q2spi_status = q2spi_status_event->value; vd->tx.callback(tx_cb_param); } gpi_free_desc: gpi_free_chan_desc(gpii_chan); +} +/* process Q2SPI CR Header TCE notification event */ +static void +gpi_process_xfer_q2spi_cr_header(struct gpii_chan *gpii_chan, + struct qup_q2spi_cr_header_event *q2spi_cr_header_event) +{ + struct gpi_client_info *client_info = &gpii_chan->client_info; + struct gpii *gpii_ptr = NULL; + struct msm_gpi_cb msm_gpi_cb; + + gpii_ptr = gpii_chan->gpii; + GPII_VERB(gpii_ptr, gpii_chan->chid, + "code:0x%x type:0x%x hdr_0:0x%x hrd_1:0x%x hrd_2:0x%x hdr3:0x%x\n", + q2spi_cr_header_event->code, q2spi_cr_header_event->type, + q2spi_cr_header_event->cr_hdr_0, q2spi_cr_header_event->cr_hdr_1, + q2spi_cr_header_event->cr_hdr_2, q2spi_cr_header_event->cr_hdr_3); + GPII_VERB(gpii_ptr, gpii_chan->chid, + "cr_byte_0:0x%x cr_byte_1:0x%x cr_byte_2:0x%x cr_byte_3h:0x%x\n", + q2spi_cr_header_event->cr_ed_byte_0, q2spi_cr_header_event->cr_ed_byte_1, + q2spi_cr_header_event->cr_ed_byte_2, q2spi_cr_header_event->cr_ed_byte_3); + GPII_VERB(gpii_ptr, gpii_chan->chid, "code:0x%x\n", q2spi_cr_header_event->code); + GPII_VERB(gpii_ptr, gpii_chan->chid, + "cr_byte_0_len:0x%x cr_byte_0_err:0x%x type:0x%x ch_id:0x%x\n", + q2spi_cr_header_event->byte0_len, q2spi_cr_header_event->byte0_err, + q2spi_cr_header_event->type, q2spi_cr_header_event->ch_id); + msm_gpi_cb.cb_event = MSM_GPI_QUP_CR_HEADER; + msm_gpi_cb.q2spi_cr_header_event = *q2spi_cr_header_event; + GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n", + TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event)); + client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb, + client_info->cb_param); } /* process all events */ @@ -1763,6 +1880,15 @@ static void gpi_process_events(struct gpii *gpii) gpi_process_qup_notif_event(gpii_chan, &gpi_event->qup_notif_event); break; + case QUP_TCE_TYPE_Q2SPI_STATUS: + gpii_chan = &gpii->gpii_chan[chid]; + gpi_process_qup_q2spi_status(gpii_chan, &gpi_event->q2spi_status); + break; + case QUP_TCE_TYPE_Q2SPI_CR_HEADER: + gpii_chan = &gpii->gpii_chan[chid]; + gpi_process_xfer_q2spi_cr_header(gpii_chan, + &gpi_event->q2spi_cr_header_event); + break; default: GPII_VERB(gpii, GPI_DBG_COMMON, "not supported event type:0x%x\n", @@ -1951,6 +2077,7 @@ static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd) GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id, gpii_chan->chid), GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid, + gpii_chan->init_config, gpii_chan->protocol, gpii_chan->seid), }, @@ -2988,13 +3115,15 @@ static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args, /* get ring size, protocol, se_id, and priority */ gpii_chan->seid = seid; gpii_chan->protocol = args->args[2]; + if (gpii_chan->protocol == SE_PROTOCOL_Q2SPI) + gpii_chan->init_config = 1; gpii_chan->req_tres = args->args[3]; gpii_chan->priority = args->args[4] & GPI_EV_PRIORITY_BMSK; GPI_LOG(gpi_dev, - "client req gpii:%u chid:%u #_tre:%u prio:%u proto:%u SE:%d\n", + "client req gpii:%u chid:%u #_tre:%u prio:%u proto:%u SE:%d init_config:%d\n", gpii, chid, gpii_chan->req_tres, gpii_chan->priority, - gpii_chan->protocol, gpii_chan->seid); + gpii_chan->protocol, gpii_chan->seid, gpii_chan->init_config); return dma_get_slave_channel(&gpii_chan->vc.chan); } diff --git a/drivers/dma/qcom/msm_gpi_mmio.h b/drivers/dma/qcom/msm_gpi_mmio.h index e73041c58ca8..456bdac15f58 100644 --- a/drivers/dma/qcom/msm_gpi_mmio.h +++ b/drivers/dma/qcom/msm_gpi_mmio.h @@ -1,11 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ /* Register offsets from gpi-top */ #define GPI_GPII_n_CH_k_CNTXT_0_OFFS(n, k) \ - (0x20000 + (0x4000 * (n)) + (0x80 * (k))) + (0x20000 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_CH_k_CNTXT_2_OFFS(n, k) \ + (0x20008 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_CH_k_CNTXT_4_OFFS(n, k) \ + (0x20010 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_CH_k_CNTXT_6_OFFS(n, k) \ + (0x20018 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_CH_k_RE_FETCH_READ_PTR(n, k) \ + (0x20054 + (0x4000 * (n)) + (0x80 * (k))) + #define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000) #define GPI_GPII_n_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24) #define GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000) @@ -46,7 +56,14 @@ /* EV Context Array */ #define GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) \ - (0x21000 + (0x4000 * (n)) + (0x80 * (k))) + (0x21000 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_EV_CH_k_CNTXT_2_OFFS(n, k) \ + (0x21008 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_EV_CH_k_CNTXT_4_OFFS(n, k) \ + (0x21010 + (0x4000 * (n)) + (0x80 * (k))) +#define GPI_GPII_n_EV_CH_k_CNTXT_6_OFFS(n, k) \ + (0x21018 + (0x4000 * (n)) + (0x80 * (k))) + #define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK (0xFF000000) #define GPI_GPII_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT (24) #define GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK (0xF00000) @@ -207,8 +224,8 @@ enum CNTXT_OFFS { /* Scratch registeres */ #define GPI_GPII_n_CH_k_SCRATCH_0_OFFS(n, k) \ (0x20060 + (0x4000 * (n)) + (0x80 * (k))) -#define GPI_GPII_n_CH_K_SCRATCH_0(pair, proto, seid) \ - ((pair << 16) | (proto << 4) | seid) +#define GPI_GPII_n_CH_K_SCRATCH_0(pair, int_config, proto, seid) \ + (((pair) << 16) | ((int_config) << 15) | ((proto) << 4) | (seid)) #define GPI_GPII_n_CH_k_SCRATCH_1_OFFS(n, k) \ (0x20064 + (0x4000 * (n)) + (0x80 * (k))) #define GPI_GPII_n_CH_k_SCRATCH_2_OFFS(n, k) \ @@ -228,4 +245,4 @@ enum CNTXT_OFFS { #define GPI_DEBUG_QSB_LOG_1 (0x5068) #define GPI_DEBUG_QSB_LOG_2 (0x506C) #define GPI_DEBUG_QSB_LOG_LAST_MISC_ID(n) (0x5070 + (0x4*n)) - +#define GPI_DEBUG_BUSY_REG (0x5010) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0e3c9412b1a6..41a0895df60e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -823,6 +823,18 @@ config SPI_QCOM_GENI This driver can also be built as a module. If so, the module will be called spi-geni-qcom. +config Q2SPI_MSM_GENI + tristate "Qualcomm Technologies Inc.'s GENI based Q2SPI controller" + depends on QCOM_GENI_SE + help + This driver supports GENI serial engine based Q2SPI controller in + master mode on the Qualcomm Technologies Inc.'s SoCs. If you say + yes to this option, support will be included for the built-in Q2SPI + interface on the Qualcomm Technologies Inc.'s SoCs. + + This driver can also be built as a module. If so, the module + will be called q2spi-geni. + config SPI_MSM_GENI tristate "Qualcomm Technologies Inc.'s GENI based SPI controller" depends on QCOM_GENI_SE diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index c6c3f50e6181..3949d0c08ad3 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -102,6 +102,8 @@ spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o +obj-$(CONFIG_Q2SPI_MSM_GENI) += q2spi-geni.o +q2spi-geni-y := q2spi-msm-geni.o q2spi-gsi.o obj-$(CONFIG_SPI_MSM_GENI) += spi-msm-geni.o obj-$(CONFIG_VIRTIO_SPI) += virtio-spi.o obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o diff --git a/drivers/spi/q2spi-gsi.c b/drivers/spi/q2spi-gsi.c new file mode 100644 index 000000000000..73cfa30fe899 --- /dev/null +++ b/drivers/spi/q2spi-gsi.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include "q2spi-msm.h" +#include "q2spi-slave-reg.h" + +static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param) +{ + struct q2spi_packet *q2spi_pkt = cb_param->userdata; + struct q2spi_geni *q2spi = q2spi_pkt->q2spi; + struct q2spi_dma_transfer *xfer; + u32 status = 0; + + if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) + xfer = q2spi->db_xfer; + else + xfer = q2spi->xfer; + + if (!xfer || !xfer->rx_buf) { + pr_err("%s rx buf NULL!!!\n", __func__); + return; + } + + Q2SPI_DEBUG(q2spi, "%s cb_param:%p cb_param->len:%p cb_param->status:%d\n", + __func__, cb_param, cb_param->length, cb_param->status); + Q2SPI_DEBUG(q2spi, "%s xfer:%p rx_buf:%p rx_dma:%p rx_len:%d m_cmd_param:%d\n", + __func__, xfer, xfer->rx_buf, xfer->rx_dma, xfer->rx_len, + q2spi_pkt->m_cmd_param); + + status = cb_param->status; //check status is 0 or EOT for success + if (cb_param->length <= xfer->rx_len) { + xfer->rx_len = cb_param->length; + q2spi_dump_ipc(q2spi, q2spi->ipc, "rx_xfer_completion_event RX", + (char *)xfer->rx_buf, cb_param->length); + complete_all(&q2spi->rx_cb); + q2spi_add_req_to_rx_queue(q2spi, status, q2spi_pkt->m_cmd_param); + } else { + Q2SPI_DEBUG(q2spi, "%s Err length miss-match %d %d\n", + __func__, cb_param->length, xfer->rx_len); + } +} + +static void q2spi_tx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param *cb_param) +{ + struct q2spi_packet *q2spi_pkt = cb_param->userdata; + struct q2spi_geni *q2spi = q2spi_pkt->q2spi; + struct q2spi_dma_transfer *xfer = q2spi->xfer; + + Q2SPI_DEBUG(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__, + xfer->tx_len, cb_param->length); + if (cb_param->length == xfer->tx_len) { + Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__); + complete_all(&q2spi->tx_cb); + } else { + dev_err(q2spi->dev, "%s length miss-match\n", __func__); + } +} + +static void q2spi_parse_q2spi_status(struct msm_gpi_dma_async_tx_cb_param *cb_param) +{ + struct q2spi_packet *q2spi_pkt = cb_param->userdata; + struct q2spi_geni *q2spi = q2spi_pkt->q2spi; + u32 status = 0; + + status = cb_param->q2spi_status; + Q2SPI_DEBUG(q2spi, "%s status:%d complete_tx_cb\n", __func__, status); + complete_all(&q2spi->tx_cb); + q2spi_add_req_to_rx_queue(q2spi, status, q2spi_pkt->m_cmd_param); +} + +static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb const *cb) +{ + Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__); + complete_all(&q2spi->tx_cb); + q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event); +} + +static void q2spi_gsi_tx_callback(void *cb) +{ + struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL; + struct q2spi_packet *q2spi_pkt; + struct q2spi_geni *q2spi; + + cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb; + if (!cb_param) { + pr_err("%s Err Invalid CB\n", __func__); + return; + } + q2spi_pkt = cb_param->userdata; + q2spi = q2spi_pkt->q2spi; + if (!q2spi) { + pr_err("%s Err Invalid q2spi\n", __func__); + return; + } + + if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) { + dev_err(q2spi->dev, "%s Unexpected CB status\n", __func__); + return; + } + if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) { + dev_err(q2spi->dev, "%s Unexpected GSI CB completion code\n", __func__); + return; + } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) { + Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); + if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) { + Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); + q2spi_tx_xfer_completion_event(cb_param); + } else if (cb_param->tce_type == QUP_TCE_TYPE_Q2SPI_STATUS) { + Q2SPI_DEBUG(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__); + q2spi_parse_q2spi_status(cb_param); + } + } +} + +static void q2spi_gsi_rx_callback(void *cb) +{ + struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL; + struct q2spi_packet *q2spi_pkt; + struct q2spi_geni *q2spi; + + cb_param = (struct msm_gpi_dma_async_tx_cb_param *)cb; + if (!cb_param) { + pr_err("%s Err Invalid CB\n", __func__); + return; + } + q2spi_pkt = cb_param->userdata; + if (!q2spi_pkt) { + pr_err("%s Err Invalid packet\n", __func__); + return; + } + q2spi = q2spi_pkt->q2spi; + if (!q2spi) { + pr_err("%s Err Invalid q2spi\n", __func__); + return; + } + + if (cb_param->status == MSM_GPI_TCE_UNEXP_ERR) { + Q2SPI_ERROR(q2spi, "%s Err cb_status:%d\n", __func__, cb_param->status); + return; + } + + if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) { + Q2SPI_ERROR(q2spi, "%s Err MSM_GPI_TCE_UNEXP_ERR\n", __func__); + return; + } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) { + Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); + if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) { + /* CR header */ + Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); + q2spi_rx_xfer_completion_event(cb_param); + } + } else { + Q2SPI_DEBUG(q2spi, "%s: Err cb_param->completion_code = %d\n", + __func__, cb_param->completion_code); + } + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); +} + +static void q2spi_geni_deallocate_chan(struct q2spi_gsi *gsi) +{ + dma_release_channel(gsi->rx_c); + dma_release_channel(gsi->tx_c); + gsi->tx_c = NULL; + gsi->rx_c = NULL; +} + +/** + * + * q2spi_geni_gsi_setup - GSI channel setup + */ +int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi) +{ + struct q2spi_gsi *gsi = NULL; + int ret = 0; + + gsi = q2spi_kzalloc(q2spi, sizeof(struct q2spi_gsi)); + if (!gsi) { + Q2SPI_ERROR(q2spi, "%s Err GSI structure memory alloc failed\n", __func__); + return -ENOMEM; + } + q2spi->gsi = gsi; + Q2SPI_DEBUG(q2spi, "%s gsi:%p\n", __func__, gsi); + if (gsi->chan_setup) { + Q2SPI_ERROR(q2spi, "%s Err GSI channel already configured\n", __func__); + return ret; + } + + gsi->tx_c = dma_request_slave_channel(q2spi->dev, "tx"); + if (IS_ERR_OR_NULL(gsi->tx_c)) { + Q2SPI_ERROR(q2spi, "%s Err Failed to get tx DMA ch %ld\n", + __func__, PTR_ERR(gsi->tx_c)); + return -EIO; + } + Q2SPI_DEBUG(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c); + gsi->rx_c = dma_request_slave_channel(q2spi->dev, "rx"); + if (IS_ERR_OR_NULL(gsi->rx_c)) { + Q2SPI_ERROR(q2spi, "%s Err Failed to get rx DMA ch %ld\n", + __func__, PTR_ERR(gsi->rx_c)); + dma_release_channel(gsi->tx_c); + gsi->tx_c = NULL; + return -EIO; + } + Q2SPI_DEBUG(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c); + gsi->tx_ev.init.callback = q2spi_gsi_ch_ev_cb; + gsi->tx_ev.init.cb_param = q2spi; + gsi->tx_ev.cmd = MSM_GPI_INIT; + gsi->tx_c->private = &gsi->tx_ev; + ret = dmaengine_slave_config(gsi->tx_c, NULL); + if (ret) { + Q2SPI_ERROR(q2spi, "%s tx dma slave config ret :%d\n", __func__, ret); + goto dmaengine_slave_config_fail; + } + + gsi->rx_ev.init.callback = q2spi_gsi_ch_ev_cb; + gsi->rx_ev.init.cb_param = q2spi; + gsi->rx_ev.cmd = MSM_GPI_INIT; + gsi->rx_c->private = &gsi->rx_ev; + ret = dmaengine_slave_config(gsi->rx_c, NULL); + if (ret) { + Q2SPI_ERROR(q2spi, "%s rx dma slave config ret :%d\n", __func__, ret); + goto dmaengine_slave_config_fail; + } + Q2SPI_DEBUG(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi); + q2spi->gsi->chan_setup = true; + return ret; + +dmaengine_slave_config_fail: + q2spi_geni_deallocate_chan(gsi); + return ret; +} + +static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_idx, int *clk_div) +{ + unsigned long sclk_freq; + unsigned long res_freq; + struct geni_se *se = &q2spi->se; + int ret = 0; + + Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid); + + ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling), + clk_idx, &sclk_freq, false); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err Failed(%d) to find src clk for 0x%x\n", + __func__, ret, speed_hz); + return ret; + } + + *clk_div = DIV_ROUND_UP(sclk_freq, (q2spi->oversampling * speed_hz)); + + if (!(*clk_div)) { + Q2SPI_ERROR(q2spi, "%s Err sclk:%lu oversampling:%d speed:%u\n", + __func__, sclk_freq, q2spi->oversampling, speed_hz); + return -EINVAL; + } + + res_freq = (sclk_freq / (*clk_div)); + + Q2SPI_DEBUG(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n", + __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div); + + ret = clk_set_rate(se->clk, sclk_freq); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret); + return ret; + } + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + return 0; +} + +/* 3.10.2.8 Q2SPI */ +static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi) +{ + struct msm_gpi_tre *c0_tre = &q2spi->gsi->config0_tre; + u8 word_len = 0; + u8 cs_mode = 0; + u8 intr_pol = 0; + u8 pack = 0; + u8 cs_clk_delay = SPI_CS_CLK_DLY; + int div = 0; + int ret = 0; + int idx = 0; + int tdn = S_GP_CNT5_TDN; + int tsn = M_GP_CNT7_TSN; + int tan = M_GP_CNT4_TAN; + int ssn = S_GP_CNT7_SSN; + int cn_delay = M_GP_CNT6_CN_DELAY; + + Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid); + ret = get_q2spi_clk_cfg(q2spi->cur_speed_hz, q2spi, &idx, &div); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err setting clks:%d\n", __func__, ret); + return ERR_PTR(ret); + } + + word_len = MIN_WORD_LEN; + pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN); + cs_mode = CS_LESS_MODE; + intr_pol = INTR_HIGH_POLARITY; + Q2SPI_DEBUG(q2spi, "%s cs_mode 0x%x word %d pack %d idx %d div %d\n", + __func__, cs_mode, word_len, pack, idx, div); + /* config0 */ + c0_tre->dword[0] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode, + intr_pol, word_len); + c0_tre->dword[1] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_delay, ssn); + c0_tre->dword[2] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, idx, div); + c0_tre->dword[3] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1); + Q2SPI_DEBUG(q2spi, "%s c0_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n", + __func__, c0_tre->dword[0], c0_tre->dword[1], + c0_tre->dword[2], c0_tre->dword[3]); + q2spi->setup_config0 = true; + return c0_tre; +} + +/* 3.10.4.9 Q2SPI */ +static struct +msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, struct q2spi_geni *q2spi) +{ + struct msm_gpi_tre *go_tre = &q2spi->gsi->go_tre; + int chain = 0; + int eot = 0; + int eob = 0; + int link_rx = 0; + + if (IS_ERR_OR_NULL(go_tre)) + return go_tre; + + go_tre->dword[0] = MSM_GPI_Q2SPI_GO_TRE_DWORD0(flags, cs, cmd); + go_tre->dword[1] = MSM_GPI_Q2SPI_GO_TRE_DWORD1; + go_tre->dword[2] = MSM_GPI_Q2SPI_GO_TRE_DWORD2(rx_len); + if (cmd == Q2SPI_RX_ONLY) { + eot = 0; + eob = 0; + /* GO TRE on RX: processing needed check this */ + chain = 0; + link_rx = 1; + } else if (cmd == Q2SPI_TX_ONLY) { + eot = 0; + /* GO TRE on TX: processing needed check this */ + eob = 0; + chain = 1; + } else if (cmd == Q2SPI_TX_RX) { + eot = 0; + eob = 0; + chain = 1; + link_rx = 1; + } + go_tre->dword[3] = MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob, chain); + Q2SPI_DEBUG(q2spi, "%s rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n", + __func__, rx_len, flags, cs, cmd, eot, eob, chain); + + if (cmd == Q2SPI_RX_ONLY) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_RX_ONLY\n", __func__); + else if (cmd == Q2SPI_TX_ONLY) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_ONLY\n", __func__); + else if (cmd == Q2SPI_TX_RX) + Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_RX_ONLY\n", __func__); + + Q2SPI_DEBUG(q2spi, "%s go_tre dword[0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n", + __func__, go_tre->dword[0], go_tre->dword[1], go_tre->dword[2], + go_tre->dword[3]); + return go_tre; +} + +/*3.10.5 DMA TRE */ +static struct +msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre, dma_addr_t buf, u32 len, + struct q2spi_geni *q2spi, bool is_tx) +{ + if (IS_ERR_OR_NULL(tre)) + return tre; + + tre->dword[0] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD0(buf); + tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf); + tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len); + tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0); + Q2SPI_DEBUG(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n", + __func__, tre->dword[0], tre->dword[1], + tre->dword[2], tre->dword[3]); + return tre; +} + +int check_gsi_transfer_completion_rx(struct q2spi_geni *q2spi) +{ + int i = 0, ret = 0; + unsigned long timeout = 0, xfer_timeout = 0; + + xfer_timeout = XFER_TIMEOUT_OFFSET; + timeout = wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout)); + if (timeout <= 0) { + Q2SPI_ERROR(q2spi, "%s Rx[%d] timeout%lu\n", __func__, i, timeout); + ret = -ETIMEDOUT; + goto err_gsi_geni_transfer; + } else { + Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__); + } +err_gsi_geni_transfer: + return ret; +} + +int check_gsi_transfer_completion(struct q2spi_geni *q2spi) +{ + int i = 0, ret = 0; + unsigned long timeout = 0, xfer_timeout = 0; + + xfer_timeout = XFER_TIMEOUT_OFFSET; + Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__, + q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot); + for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) { + timeout = + wait_for_completion_timeout(&q2spi->tx_cb, msecs_to_jiffies(xfer_timeout)); + if (timeout <= 0) { + Q2SPI_ERROR(q2spi, "%s Tx[%d] timeout\n", __func__, i); + ret = -ETIMEDOUT; + goto err_gsi_geni_transfer; + } else { + Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__); + } + } + + for (i = 0 ; i < q2spi->gsi->num_rx_eot; i++) { + timeout = + wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout)); + if (timeout <= 0) { + Q2SPI_ERROR(q2spi, "%s Rx[%d] timeout\n", __func__, i); + ret = -ETIMEDOUT; + goto err_gsi_geni_transfer; + } else { + Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__); + } + } +err_gsi_geni_transfer: + if (q2spi->gsi->qup_gsi_err) { + Q2SPI_ERROR(q2spi, "%s Err QUP Gsi Error\n", __func__); + q2spi->gsi->qup_gsi_err = false; + q2spi->setup_config0 = false; + dmaengine_terminate_all(q2spi->gsi->tx_c); + } + return ret; +} + +int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) +{ + struct msm_gpi_tre *c0_tre = NULL; + struct msm_gpi_tre *go_tre = NULL; + struct msm_gpi_tre *tx_tre = NULL; + struct msm_gpi_tre *rx_tre = NULL; + struct scatterlist *xfer_tx_sg; + struct scatterlist *xfer_rx_sg; + u8 cs = 0; + u32 tx_rx_len = 0; + int rx_nent = 0; + int tx_nent = 0; + int go_flags = 0; + unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; + struct q2spi_geni *q2spi = q2spi_pkt->q2spi; + struct q2spi_dma_transfer *xfer; + u8 cmd; + + if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) + xfer = q2spi->db_xfer; + else + xfer = q2spi->xfer; + cmd = xfer->cmd; + + Q2SPI_DEBUG(q2spi, "%s PID=%d xfer:%p\n", __func__, current->pid, xfer); + reinit_completion(&q2spi->tx_cb); + reinit_completion(&q2spi->rx_cb); + if (q2spi_pkt->vtype == VARIANT_1_HRF) + reinit_completion(&q2spi->doorbell_up); + + Q2SPI_DEBUG(q2spi, "%s cmd:%d q2spi_pkt:%p\n", __func__, cmd, q2spi_pkt); + q2spi->gsi->num_tx_eot = 0; + q2spi->gsi->num_rx_eot = 0; + q2spi->gsi->qup_gsi_err = false; + xfer_tx_sg = q2spi->gsi->tx_sg; + xfer_rx_sg = q2spi->gsi->rx_sg; + c0_tre = &q2spi->gsi->config0_tre; + go_tre = &q2spi->gsi->go_tre; + tx_nent++; + if (!q2spi->setup_config0) { + c0_tre = setup_cfg0_tre(q2spi); + if (IS_ERR_OR_NULL(c0_tre)) { + Q2SPI_DEBUG(q2spi, "%s Err setting c0_tre", __func__); + return -EINVAL; + } + } + + if (cmd == Q2SPI_TX_ONLY) + tx_rx_len = xfer->tx_data_len; + else + tx_rx_len = xfer->rx_data_len; + go_flags |= Q2SPI_CMD; + go_flags |= (SINGLE_SDR_MODE << Q2SPI_MODE_SHIFT) & Q2SPI_MODE; + go_tre = setup_go_tre(cmd, cs, tx_rx_len, go_flags, q2spi); + if (IS_ERR_OR_NULL(go_tre)) { + Q2SPI_DEBUG(q2spi, "%s Err setting g0_tre", __func__); + return -EINVAL; + } + if (cmd == Q2SPI_TX_ONLY) { + tx_nent += 2; + } else if (cmd == Q2SPI_RX_ONLY) { + tx_nent++; + rx_nent++; + } else if (cmd == Q2SPI_TX_RX) { + tx_nent += 2; + rx_nent++; + } + Q2SPI_DEBUG(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent); + sg_init_table(xfer_tx_sg, tx_nent); + if (rx_nent) + sg_init_table(xfer_rx_sg, rx_nent); + if (c0_tre) + sg_set_buf(xfer_tx_sg++, c0_tre, sizeof(*c0_tre)); + sg_set_buf(xfer_tx_sg++, go_tre, sizeof(*go_tre)); + tx_tre = &q2spi->gsi->tx_dma_tre; + tx_tre = setup_dma_tre(tx_tre, xfer->tx_dma, xfer->tx_len, q2spi, 1); + if (IS_ERR_OR_NULL(tx_tre)) { + Q2SPI_ERROR(q2spi, "%s Err setting up tx tre\n", __func__); + return -EINVAL; + } + sg_set_buf(xfer_tx_sg++, tx_tre, sizeof(*tx_tre)); + q2spi->gsi->num_tx_eot++; + + q2spi->gsi->tx_desc = dmaengine_prep_slave_sg(q2spi->gsi->tx_c, q2spi->gsi->tx_sg, tx_nent, + DMA_MEM_TO_DEV, flags); + if (IS_ERR_OR_NULL(q2spi->gsi->tx_desc)) { + Q2SPI_ERROR(q2spi, "%s Err setting up tx desc\n", __func__); + return -EIO; + } + q2spi->gsi->tx_desc->callback = q2spi_gsi_tx_callback; + q2spi->gsi->tx_desc->callback_param = &q2spi->gsi->tx_cb_param; + q2spi->gsi->tx_cb_param.userdata = q2spi_pkt; + q2spi->gsi->tx_cookie = dmaengine_submit(q2spi->gsi->tx_desc); + Q2SPI_DEBUG(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param); + if (dma_submit_error(q2spi->gsi->tx_cookie)) { + Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n", + __func__, q2spi->gsi->tx_cookie); + dmaengine_terminate_all(q2spi->gsi->tx_c); + return -EINVAL; + } + + if (cmd & Q2SPI_RX_ONLY) { + rx_tre = &q2spi->gsi->rx_dma_tre; + rx_tre = setup_dma_tre(rx_tre, xfer->rx_dma, xfer->rx_len, q2spi, 1); + if (IS_ERR_OR_NULL(rx_tre)) { + Q2SPI_ERROR(q2spi, "%s Err setting up rx tre\n", __func__); + return -EINVAL; + } + sg_set_buf(xfer_rx_sg, rx_tre, sizeof(*rx_tre)); + q2spi->gsi->rx_desc = dmaengine_prep_slave_sg(q2spi->gsi->rx_c, q2spi->gsi->rx_sg, + rx_nent, DMA_DEV_TO_MEM, flags); + if (IS_ERR_OR_NULL(q2spi->gsi->rx_desc)) { + Q2SPI_ERROR(q2spi, "%s rx_desc fail\n", __func__); + return -EIO; + } + q2spi->gsi->rx_desc->callback = q2spi_gsi_rx_callback; + q2spi->gsi->rx_desc->callback_param = &q2spi->gsi->rx_cb_param; + q2spi->gsi->rx_cb_param.userdata = q2spi_pkt; + q2spi->gsi->num_rx_eot++; + q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->rx_desc); + Q2SPI_DEBUG(q2spi, "%s Rx cb_param:%p\n", __func__, + q2spi->gsi->rx_desc->callback_param); + if (dma_submit_error(q2spi->gsi->rx_cookie)) { + Q2SPI_ERROR(q2spi, "%s Err dmaengine_submit failed (%d)\n", + __func__, q2spi->gsi->rx_cookie); + dmaengine_terminate_all(q2spi->gsi->rx_c); + return -EINVAL; + } + } + if (cmd & Q2SPI_RX_ONLY) { + Q2SPI_DEBUG(q2spi, "%s rx_c dma_async_issue_pending\n", __func__); + q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA-RX", (char *)xfer->rx_buf, tx_rx_len); + dma_async_issue_pending(q2spi->gsi->rx_c); + } + + if (cmd & Q2SPI_TX_ONLY) + q2spi_dump_ipc(q2spi, q2spi->ipc, "GSI DMA TX", (char *)xfer->tx_buf, + Q2SPI_HEADER_LEN + tx_rx_len); + + Q2SPI_DEBUG(q2spi, "%s tx_c dma_async_issue_pending\n", __func__); + dma_async_issue_pending(q2spi->gsi->tx_c); + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + return 0; +} + +void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *ptr) +{ + struct q2spi_geni *q2spi = ptr; + + Q2SPI_DEBUG(q2spi, "%s event:%d\n", __func__, cb->cb_event); + switch (cb->cb_event) { + case MSM_GPI_QUP_NOTIFY: + case MSM_GPI_QUP_MAX_EVENT: + dev_err(q2spi->dev, "%s:cb_ev%d status%llu ts%llu count%llu\n", + __func__, cb->cb_event, cb->status, + cb->timestamp, cb->count); + break; + case MSM_GPI_QUP_ERROR: + case MSM_GPI_QUP_CH_ERROR: + case MSM_GPI_QUP_FW_ERROR: + case MSM_GPI_QUP_PENDING_EVENT: + case MSM_GPI_QUP_EOT_DESC_MISMATCH: + case MSM_GPI_QUP_SW_ERROR: + Q2SPI_ERROR(q2spi, "%s cb_ev %d status %llu ts %llu count %llu\n", + __func__, cb->cb_event, cb->status, + cb->timestamp, cb->count); + Q2SPI_ERROR(q2spi, "%s err_routine:%u err_type:%u err.code%u\n", + __func__, cb->error_log.routine, cb->error_log.type, + cb->error_log.error_code); + q2spi->gsi->qup_gsi_err = true; + break; + case MSM_GPI_QUP_CR_HEADER: + Q2SPI_DEBUG(q2spi, "%s GSI doorbell event\n", __func__); + q2spi_parse_cr_header(q2spi, cb); + break; + default: + break; + } + + if (cb->cb_event == MSM_GPI_QUP_FW_ERROR) { + q2spi_geni_se_dump_regs(q2spi); + Q2SPI_ERROR(q2spi, "%s dump GSI regs\n", __func__); + } +} diff --git a/drivers/spi/q2spi-gsi.h b/drivers/spi/q2spi-gsi.h new file mode 100644 index 000000000000..af6fae23292a --- /dev/null +++ b/drivers/spi/q2spi-gsi.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __SPI_Q2SPI_GPI_H_ +#define __SPI_Q2SPI_GPI_H_ + +/* Q2SPI Config0 TRE */ +#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode, intr_pol, word_size) \ + (((tsn) << 27) | ((pack) << 24) | \ + ((tdn) << 14) | ((cs_mode) << 6) | ((intr_pol) << 5) | (word_size)) +#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_del, ssn) \ + ((tan) | ((cs_clk_del) << 8) | ((ssn) << 16)) +#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, clk_src, clk_div) (((cn_delay) << 20) | \ + ((clk_src) << 16) | (clk_div)) +#define MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) \ + ((0x2 << 20) | (0x2 << 16) | ((link_rx) << 11) | ((bei) << 10) | \ + ((ieot) << 9) | ((ieob) << 8) | (ch)) + +/* Q2SPI Go TRE */ +#define MSM_GPI_Q2SPI_GO_TRE_DWORD0(flags, cs, cmd) (((flags) << 17) | \ + ((cs) << 8) | (cmd)) +#define MSM_GPI_Q2SPI_GO_TRE_DWORD1 (0) +#define MSM_GPI_Q2SPI_GO_TRE_DWORD2(rx_len) (rx_len) +#define MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, bei, ieot, ieob, ch) ((0x2 << 20) | \ + (0x0 << 16) | ((link_rx) << 11) | ((bei) << 10) | ((ieot) << 9) | \ + ((ieob) << 8) | (ch)) + +/** + * struct q2spi_gsi - structure to store gsi information for q2spi driver + * + * @tx_c: TX DMA channel + * @rx_c: RX DMA channel + * @config0_tre: stores config0 tre info + * @go_tre: stores go tre info + * @tx_dma_tre: stores DMA TX tre info + * @rx_dma_tre: stores DMA RX tre info + * @tx_ev: control structure to config gpi dma engine via dmaengine_slave_config() for tx. + * @rx_ev: control structure to config gpi dma engine via dmaengine_slave_config() for rx. + * @tx_desc: async transaction descriptor for tx + * @rx_desc: async transaction descriptor for rx + * @tx_cb_param: gpi specific callback parameters to pass between gpi client and gpi engine for TX. + * @rx_cb_param: gpi specific callback parameters to pass between gpi client and gpi engine for RX. + * @chan_setup: flag to mark channel setup completion. + * @tx_sg: sg table for TX transfers + * @rx_sg: sg table for RX transfers + * tx_cookie: Represents dma tx cookie + * rx_cookie: Represents dma rx cookie + * num_tx_eot: Represents number of TX End of Transfers + * num_rx_eot: Represents number of RX End of Transfers + * qup_gsi_err: flag to represent gsi error if any + */ +struct q2spi_gsi { + struct dma_chan *tx_c; + struct dma_chan *rx_c; + struct msm_gpi_tre config0_tre; + struct msm_gpi_tre go_tre; + struct msm_gpi_tre tx_dma_tre; + struct msm_gpi_tre rx_dma_tre; + struct msm_gpi_ctrl tx_ev; + struct msm_gpi_ctrl rx_ev; + struct dma_async_tx_descriptor *tx_desc; + struct dma_async_tx_descriptor *rx_desc; + struct msm_gpi_dma_async_tx_cb_param tx_cb_param; + struct msm_gpi_dma_async_tx_cb_param rx_cb_param; + bool chan_setup; + struct scatterlist tx_sg[3]; + struct scatterlist rx_sg[3]; + dma_cookie_t tx_cookie; + dma_cookie_t rx_cookie; + int num_tx_eot; + int num_rx_eot; + bool qup_gsi_err; +}; + +#endif /* __SPI_Q2SPI_GPI_H_ */ diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c new file mode 100644 index 000000000000..6409a1f20e7c --- /dev/null +++ b/drivers/spi/q2spi-msm-geni.c @@ -0,0 +1,3031 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "q2spi-msm.h" +#include "q2spi-slave-reg.h" + +#define PINCTRL_DEFAULT "default" +#define PINCTRL_ACTIVE "active" +#define PINCTRL_SLEEP "sleep" + +#define CREATE_TRACE_POINTS +#include "q2spi-trace.h" + +static int q2spi_slave_init(struct q2spi_geni *q2spi); +static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt); + +/* FTRACE Logging */ +void q2spi_trace_log(struct device *dev, const char *fmt, ...) +{ + struct va_format vaf = { + .fmt = fmt, + }; + + va_list args; + + va_start(args, fmt); + vaf.va = &args; + trace_q2spi_log_info(dev_name(dev), &vaf); + va_end(args); +} + +/** + * q2spi_kzalloc - allocate kernel memory + * @q2spi: Pointer to main q2spi_geni structure + * @size: Size of the memory to allocate + * + * Allocate device memory. Memory allocated with this function + * is automatically freed on driver detach. + * + * Return: Pointer to allocated memory on success, NULL on failure. + */ +void *q2spi_kzalloc(struct q2spi_geni *q2spi, int size) +{ + void *ptr = kzalloc(size, GFP_ATOMIC); + + if (ptr) { + q2spi_alloc_count++; + Q2SPI_DEBUG(q2spi, "Allocated %d 0x%p\n", q2spi_alloc_count, ptr); + } + return ptr; +} + +/** + * q2spi_kfree - Free allocated kernel memory + * @q2spi: Pointer to main q2spi_geni structure + * @size: Size of the memory to be free + * + * Free kernel device memory allocated by q2spi_kzalloc(). + * + */ +void q2spi_kfree(struct q2spi_geni *q2spi, void *ptr) +{ + if (ptr) { + q2spi_alloc_count--; + kfree(ptr); + } + Q2SPI_DEBUG(q2spi, "Freeing %d 0x%p\n", q2spi_alloc_count, ptr); +} + +void __q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix, + char *str, int total, int offset, int size) +{ + char buf[DATA_BYTES_PER_LINE * 5]; + char data[DATA_BYTES_PER_LINE * 5]; + int len = min(size, DATA_BYTES_PER_LINE); + + hex_dump_to_buffer(str, len, DATA_BYTES_PER_LINE, 1, buf, sizeof(buf), false); + scnprintf(data, sizeof(data), "%s[%d-%d of %d]: %s", prefix, offset + 1, + offset + len, total, buf); + Q2SPI_DEBUG(q2spi, "%s: %s\n", __func__, data); +} + +/** + * q2spi_dump_ipc - Log dump function for debugging + * @q2spi: Pointer to main q2spi_geni structure + * @ipc_ctx: IPC context pointer to dump logs in IPC + * @Prefix: Prefix to use in log + * @str: String to dump in log + * @Size: Size of data bytes per line + * free bulk dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + */ +void q2spi_dump_ipc(struct q2spi_geni *q2spi, void *ipc_ctx, char *prefix, + char *str, int size) +{ + int offset = 0, total_bytes = size; + + while (size > CHUNK_SIZE) { + __q2spi_dump_ipc(q2spi, prefix, (char *)str + offset, total_bytes, + offset, CHUNK_SIZE); + offset += CHUNK_SIZE; + size -= CHUNK_SIZE; + } + __q2spi_dump_ipc(q2spi, prefix, (char *)str + offset, total_bytes, offset, size); +} + +/** + * q2spi_free_bulk_buf - free bulk buffers from pool + * @q2spi: Pointer to main q2spi_geni structure + * + * free bulk dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + * Return: 0 for success, negative number if buffer is not found + */ +static int q2spi_free_bulk_buf(struct q2spi_geni *q2spi) +{ + void *buf; + dma_addr_t dma_addr; + int i; + size_t size; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->bulk_buf[i]) + continue; + if (q2spi->bulk_buf_used[i]) + return -1; + buf = q2spi->bulk_buf[i]; + dma_addr = q2spi->bulk_dma_buf[i]; + size = sizeof(struct q2spi_client_bulk_access_pkt); + geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size); + } + return 0; +} + +/** + * q2spi_free_cr_buf - free cr buffers from pool + * @q2spi: Pointer to main q2spi_geni structure + * + * free cr dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + * Return: 0 for success, negative number if buffer is not found + */ +static int q2spi_free_cr_buf(struct q2spi_geni *q2spi) +{ + void *buf; + dma_addr_t dma_addr; + int i; + size_t size; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->cr_buf[i]) + continue; + if (q2spi->cr_buf_used[i]) + return -1; + buf = q2spi->cr_buf[i]; + dma_addr = q2spi->cr_dma_buf[i]; + size = sizeof(struct q2spi_client_dma_pkt); + geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size); + } + return 0; +} + +/** + * q2spi_free_var5_buf - free var5 buffers from pool + * @q2spi: Pointer to main q2spi_geni structure + * + * free var5 dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + * Return: 0 for success, negative number if buffer is not found + */ +static int q2spi_free_var5_buf(struct q2spi_geni *q2spi) +{ + void *buf; + dma_addr_t dma_addr; + int i; + size_t size; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->var5_buf[i]) + continue; + if (q2spi->var5_buf_used[i]) + return -1; + buf = q2spi->var5_buf[i]; + dma_addr = q2spi->var5_dma_buf[i]; + size = sizeof(struct q2spi_host_variant4_5_pkt); + geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size); + } + return 0; +} + +/** + * q2spi_free_var1_buf - free var1 buffers from pool + * @q2spi: Pointer to main q2spi_geni structure + * + * free var1 dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + * Return: 0 for success, negative number if buffer is not found + */ +static int q2spi_free_var1_buf(struct q2spi_geni *q2spi) +{ + void *buf; + dma_addr_t dma_addr; + int i; + size_t size; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->var1_buf[i]) + continue; + if (q2spi->var1_buf_used[i]) + return -1; + buf = q2spi->var1_buf[i]; + dma_addr = q2spi->var1_dma_buf[i]; + size = sizeof(struct q2spi_host_variant1_pkt); + geni_se_common_iommu_free_buf(q2spi->wrapper_dev, &dma_addr, buf, size); + } + return 0; +} + +/** + * q2spi_free_dma_buf - free dma mapped buffers + * @q2spi: Pointer to main q2spi_geni structure + * + * free dma mapped buffers allocated by q2spi_pre_alloc_buffers api. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_free_dma_buf(struct q2spi_geni *q2spi) +{ + int ret; + + ret = q2spi_free_bulk_buf(q2spi); + if (!ret) { + Q2SPI_ERROR(q2spi, "%s Err free bulk buf fail\n", __func__); + return ret; + } + + ret = q2spi_free_cr_buf(q2spi); + if (!ret) { + Q2SPI_ERROR(q2spi, "%s Err free cr buf fail\n", __func__); + return ret; + } + + ret = q2spi_free_var5_buf(q2spi); + if (!ret) { + Q2SPI_ERROR(q2spi, "%s Err free var5 buf fail\n", __func__); + return ret; + } + + ret = q2spi_free_var1_buf(q2spi); + if (!ret) { + Q2SPI_ERROR(q2spi, "%s Err free var1 buf fail\n", __func__); + return ret; + } + + return 0; +} + +/** + * q2spi_pre_alloc_buffers - Allocate iommu mapped buffres + * @q2spi: Pointer to main q2spi_geni structure + * + * This function allocates Q2SPI_MAX_BUF buffers of Variant_1 type + * packets and Q2SPI_MAX_BUF bufferes of Variant_5 type packets and + * Q2SPI_MAX_BUF bufferes of CR type 3. + * This function will allocate and map into QUPV3 core context bank. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_pre_alloc_buffers(struct q2spi_geni *q2spi) +{ + int i, ret; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + q2spi->var1_buf[i] = + geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->var1_dma_buf[i], + sizeof(struct q2spi_host_variant1_pkt)); + if (IS_ERR_OR_NULL(q2spi->var1_buf[i])) { + Q2SPI_ERROR(q2spi, "%s Err var1 buf alloc fail\n", __func__); + goto exit_dealloc; + } + + q2spi->var5_buf[i] = + geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, + &q2spi->var5_dma_buf[i], (SMA_BUF_SIZE + + sizeof(struct q2spi_host_variant4_5_pkt))); + if (IS_ERR_OR_NULL(q2spi->var5_buf[i])) { + Q2SPI_ERROR(q2spi, "%s Err var5 buf alloc fail\n", __func__); + goto exit_dealloc; + } + + q2spi->cr_buf[i] = + geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->cr_dma_buf[i], + RX_DMA_CR_BUF_SIZE); + if (IS_ERR_OR_NULL(q2spi->cr_buf[i])) { + Q2SPI_ERROR(q2spi, "%s Err cr buf alloc fail\n", __func__); + goto exit_dealloc; + } + memset(q2spi->cr_buf[i], 0xFF, RX_DMA_CR_BUF_SIZE); + + q2spi->bulk_buf[i] = + geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->bulk_dma_buf[i], + sizeof(struct + q2spi_client_bulk_access_pkt)); + if (IS_ERR_OR_NULL(q2spi->bulk_buf[i])) { + Q2SPI_ERROR(q2spi, "%s Err bulk buf alloc fail\n", __func__); + goto exit_dealloc; + } + Q2SPI_DEBUG(q2spi, "%s var1_buf[%d] virt:%p phy:%p\n", + __func__, i, (void *)q2spi->var1_buf[i], q2spi->var1_dma_buf[i]); + Q2SPI_DEBUG(q2spi, "%s var5_buf[%d] virt:%p phy:%p\n", + __func__, i, (void *)q2spi->var5_buf[i], q2spi->var5_dma_buf[i]); + Q2SPI_DEBUG(q2spi, "%s cr_buf[%d] virt:%p phy:%p\n", + __func__, i, (void *)q2spi->cr_buf[i], q2spi->cr_dma_buf[i]); + Q2SPI_DEBUG(q2spi, "%s bulk_buf[%d] virt:%p phy:%p\n", + __func__, i, (void *)q2spi->bulk_buf[i], q2spi->bulk_dma_buf[i]); + } + return 0; +exit_dealloc: + ret = q2spi_free_dma_buf(q2spi); + if (ret) + ret = -ENOMEM; + + return ret; +} + +/** + * q2spi_unmap_dma_buf_used - Unmap dma buffer used + * @q2spi: Pointer to main q2spi_geni structure + * @tx_dma: TX dma pointer + * @rx_dma: RX dma pointer + * + * This function marks buffer used to free so that we are reuse the buffers. + * + */ +static void +q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t rx_dma) +{ + int i = 0; + + if (!tx_dma && !rx_dma) { + Q2SPI_ERROR(q2spi, "%s Err TX/RX dma buffer NULL\n", __func__); + return; + } + + Q2SPI_DEBUG(q2spi, "%s for tx_dma:%p rx_dma:%p\n", __func__, tx_dma, rx_dma); + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + Q2SPI_DEBUG(q2spi, "%s var1_dma_buf[%d]=%p var5_dma_buf[%d]=%p\n", + __func__, i, q2spi->var1_dma_buf[i], i, q2spi->var5_dma_buf[i]); + Q2SPI_DEBUG(q2spi, "%s cr_dma_buf[%d]=%p bulk_dma_buf[%d]=%p\n", + __func__, i, q2spi->cr_dma_buf[i], i, q2spi->bulk_dma_buf[i]); + if (tx_dma == q2spi->var1_dma_buf[i]) { + if (q2spi->var1_buf_used[i]) { + Q2SPI_DEBUG(q2spi, "UNMAP var1_buf[%d] virt:%p phy:%p\n", + i, q2spi->var1_buf[i], q2spi->var1_dma_buf[i]); + q2spi->var1_buf_used[i] = NULL; + } + } else if (tx_dma == q2spi->var5_dma_buf[i]) { + if (q2spi->var5_buf_used[i]) { + Q2SPI_DEBUG(q2spi, "UNMAP var5_buf[%d] virt:%p phy:%p\n", + i, q2spi->var5_buf[i], q2spi->var5_dma_buf[i]); + q2spi->var5_buf_used[i] = NULL; + } + } + if (rx_dma == q2spi->cr_dma_buf[i]) { + if (q2spi->cr_buf_used[i]) { + Q2SPI_DEBUG(q2spi, "UNMAP cr_buf[%d] virt:%p phy:%p\n", + i, q2spi->cr_buf[i], q2spi->cr_dma_buf[i]); + q2spi->cr_buf_used[i] = NULL; + } + } else if (rx_dma == q2spi->bulk_dma_buf[i]) { + if (q2spi->bulk_buf_used[i]) { + Q2SPI_DEBUG(q2spi, "UNMAP bulk_buf[%d] virt:%p phy:%p\n", + i, q2spi->bulk_buf[i], q2spi->bulk_dma_buf[i]); + q2spi->bulk_buf_used[i] = NULL; + } + } + } + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); +} + +/** + * q2spi_get_doorbell_rx_buf - allocate RX DMA buffer to GSI + * @q2spi: Pointer to main q2spi_geni structure + * + * This function will get one RX buffer from pool of buffers + * allocated using q2spi_pre_alloc_buffers() and prepare RX DMA + * descriptor and map to GSI. + * This RX buffer is used to receive doorbell from GSI. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_get_doorbell_rx_buf(struct q2spi_geni *q2spi) +{ + struct q2spi_dma_transfer *xfer = q2spi->db_xfer; + int i, ret = 0; + + /* Pick rx buffers from pre allocated pool */ + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->cr_buf_used[i]) + break; + } + if (i < Q2SPI_MAX_BUF) { + Q2SPI_DEBUG(q2spi, "%s q2spi:%p q2spi_xfer:%p\n", __func__, q2spi, q2spi->xfer); + xfer->rx_buf = q2spi->cr_buf[i]; + xfer->rx_dma = q2spi->cr_dma_buf[i]; + q2spi->cr_buf_used[i] = q2spi->cr_buf[i]; + q2spi->rx_buf = xfer->rx_buf; + Q2SPI_DEBUG(q2spi, "ALLOC %s rx_buf:%p rx_dma:%p\n", + __func__, xfer->rx_buf, xfer->rx_dma); + memset(xfer->rx_buf, 0xFF, RX_DMA_CR_BUF_SIZE); + } + if (!xfer->rx_buf || !xfer->rx_dma) { + Q2SPI_ERROR(q2spi, "%s Err RX dma alloc failed\n", __func__); + ret = -ENOMEM; + } + return ret; +} + +/** + * q2spi_alloc_rx_buf - allocate RX DMA buffers + * @q2spi: Pointer to main q2spi_geni structure + * @len: size of the memory to be allocate + * + * This function will allocate RX dma_alloc_coherant memory + * of the length specified. This RX buffer is used to + * receive rx data from slave. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_alloc_rx_buf(struct q2spi_geni *q2spi, int len) +{ + struct q2spi_dma_transfer *xfer = q2spi->xfer; + int ret = 0; + + Q2SPI_DEBUG(q2spi, "%s len:%d\n", __func__, len); + if (!len) { + Q2SPI_ERROR(q2spi, "%s Err Zero length for alloc\n", __func__); + ret = -EINVAL; + goto fail; + } + + xfer->rx_buf = geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &xfer->rx_dma, len); + if (IS_ERR_OR_NULL(xfer->rx_buf)) { + Q2SPI_ERROR(q2spi, "%s Err iommu alloc buf failed\n", __func__); + ret = -ENOMEM; + goto fail; + } + Q2SPI_DEBUG(q2spi, "%s rx_buf=%p rx_dma=%p\n", __func__, xfer->rx_buf, xfer->rx_dma); + memset(xfer->rx_buf, 0xFF, len); +fail: + return ret; +} + +/** + * q2spi_hrf_entry_format - prepare HRF entry for HRF flow + * @q2spi: Pointer to main q2spi_geni structure + * @q2spi_req: structure for q2spi_request + * @q2spi_hrf_req: pointer to q2spi hrf type of q2spi_request + * + * This function hrf entry as per the format defined in spec. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_hrf_entry_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, + struct q2spi_request **q2spi_hrf_req_ptr) +{ + struct q2spi_request *q2spi_hrf_req = NULL; + struct q2spi_mc_hrf_entry hrf_entry; + int flow_id; + + q2spi_hrf_req = q2spi_kzalloc(q2spi, sizeof(struct q2spi_request)); + if (!q2spi_hrf_req) { + Q2SPI_ERROR(q2spi, "%s Err alloc hrf req failed\n", __func__); + return -ENOMEM; + } + q2spi_hrf_req->data_buff = q2spi_kzalloc(q2spi, sizeof(struct q2spi_mc_hrf_entry)); + if (!q2spi_hrf_req->data_buff) { + Q2SPI_ERROR(q2spi, "%s Err alloc hrf data_buff failed\n", __func__); + return -ENOMEM; + } + *q2spi_hrf_req_ptr = q2spi_hrf_req; + if (q2spi_req.cmd == HRF_WRITE) { + hrf_entry.cmd = 3; + hrf_entry.parity = 1; + } else if (q2spi_req.cmd == HRF_READ) { + hrf_entry.cmd = 4; + hrf_entry.parity = 0; + } + hrf_entry.flow = HRF_ENTRY_FLOW; + hrf_entry.type = HRF_ENTRY_TYPE; + flow_id = q2spi_alloc_xfer_tid(q2spi); + if (flow_id < 0) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc flow_id", __func__); + return -EINVAL; + } + hrf_entry.flow_id = flow_id; + Q2SPI_DEBUG(q2spi, "%s flow_id:%d len:%d", __func__, hrf_entry.flow_id, q2spi_req.data_len); + if (q2spi_req.data_len % 4) { + hrf_entry.dwlen_part1 = (q2spi_req.data_len / 4) & 0xF; + hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4) >> 4) & 0xFF; + hrf_entry.dwlen_part3 = ((q2spi_req.data_len / 4) >> 12) & 0xFF; + } else { + hrf_entry.dwlen_part1 = (q2spi_req.data_len / 4 - 1) & 0xF; + hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4 - 1) >> 4) & 0xFF; + hrf_entry.dwlen_part3 = ((q2spi_req.data_len / 4 - 1) >> 12) & 0xFF; + } + Q2SPI_DEBUG(q2spi, "%s hrf_entry dwlen part1:%d part2:%d part3:%d\n", + __func__, hrf_entry.dwlen_part1, hrf_entry.dwlen_part2, hrf_entry.dwlen_part3); + hrf_entry.arg2 = q2spi_req.end_point; + hrf_entry.arg3 = q2spi_req.proto_ind; + q2spi_hrf_req->addr = q2spi_req.addr; + q2spi_hrf_req->data_len = HRF_ENTRY_DATA_LEN; + q2spi_hrf_req->cmd = HRF_WRITE; + q2spi_hrf_req->flow_id = hrf_entry.flow_id; + q2spi_hrf_req->end_point = q2spi_req.end_point; + q2spi_hrf_req->proto_ind = q2spi_req.proto_ind; + memcpy(q2spi_hrf_req->data_buff, &hrf_entry, sizeof(struct q2spi_mc_hrf_entry)); + Q2SPI_DEBUG(q2spi, "%s End q2spi_req:%d q2spi_hrf_req:%p *q2spi_hrf_req:%d\n", + __func__, q2spi_req, q2spi_hrf_req, *q2spi_hrf_req); + + return 0; +} + +/** + * q2spi_map_doorbell_rx_buf - map rx dma buffer to receive doorbell + * @q2spi: Pointer to main q2spi_geni structure + * + * This function get one rx buffer using q2spi_get_doorbell_rx_buf and map to + * gsi so that SW can receive doorbell + * + * Return: 0 for success, negative number for error condition. + */ +int q2spi_map_doorbell_rx_buf(struct q2spi_geni *q2spi) +{ + struct q2spi_packet *q2spi_pkt; + int ret = 0; + + Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + if (q2spi->db_xfer->rx_dma) { + Q2SPI_DEBUG(q2spi, "%s Doorbell buffer already mapped\n", __func__); + return 0; + } + q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet)); + if (!q2spi_pkt) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_pkt alloc fail\n", __func__); + return -ENOMEM; + } + + q2spi_pkt->m_cmd_param = Q2SPI_RX_ONLY; + memset(q2spi->db_xfer, 0, sizeof(struct q2spi_dma_transfer)); + /* RX DMA buffer allocated to map to GSI to Recive Doorbell */ + /* Alloc RX DMA buf and map to gsi so that SW can receive Doorbell */ + ret = q2spi_get_doorbell_rx_buf(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf", __func__); + return ret; + } + /* Map RX DMA descriptor on RX channel */ + q2spi->db_xfer->cmd = Q2SPI_RX_ONLY; + q2spi->db_xfer->rx_data_len = RX_DMA_CR_BUF_SIZE; /* 96 byte for 4 crs in doorbell */ + q2spi->db_xfer->rx_len = RX_DMA_CR_BUF_SIZE; + q2spi_pkt->q2spi = q2spi; + mutex_lock(&q2spi->gsi_lock); + ret = q2spi_setup_gsi_xfer(q2spi_pkt); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret); + mutex_unlock(&q2spi->gsi_lock); + return ret; + } + mutex_unlock(&q2spi->gsi_lock); + q2spi->doorbell_setup = true; + /* Todo unmap_buff and tid */ + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + return ret; +} + +/** + * q2spi_alloc_host_variant - allocate memory for host variant + * @q2spi: Pointer to main q2spi_geni structure + * @len: size of the memory to be allocate + * + * This function will allocate dma_alloc_coherant memory + * of the length specified. + * + * Return: address of the buffer on success, NULL or ERR_PTR on + * failure/error. + */ +void *q2spi_alloc_host_variant(struct q2spi_geni *q2spi, int len) +{ + void *ptr = NULL; + + ptr = geni_se_common_iommu_alloc_buf(q2spi->wrapper_dev, &q2spi->dma_buf, len); + + return ptr; +} + +/** + * q2spi_doorbell - q2spi doorbell to handle CR events from q2spi slave + * @q2spi_cr_hdr_event: Pointer to q2spi_cr_hdr_event + * + * If Doorbell interrupt to Host is enabled, then Host will get doorbell interrupt upon + * any error or new CR events from q2spi slave. This function used to parse CR header event + * part of doorbell and prepare CR packet and add to CR queue list. Also map new RX + * dma buffer to receive next doorbell. + * + * Return: 0 for success, negative number for error condition. + */ +void q2spi_doorbell(struct q2spi_geni *q2spi, + const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event) +{ + Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + memcpy(&q2spi->q2spi_cr_hdr_event, q2spi_cr_hdr_event, + sizeof(struct qup_q2spi_cr_header_event)); + queue_work(q2spi->doorbell_wq, &q2spi->q2spi_doorbell_work); + Q2SPI_DEBUG(q2spi, "%s End work queued PID=%d\n", __func__, current->pid); +} + +/** + * q2spi_prepare_cr_pkt - Prepares CR packet as part of doorbell processing + * @q2spi: Pointer to main q2spi_geni structure + * + * Return: 0 for success, negative number on failure + */ +static int q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) +{ + struct q2spi_cr_packet *q2spi_cr_pkt = NULL; + const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event = NULL; + unsigned long flags; + int ret = 0, i = 0; + + q2spi_cr_hdr_event = &q2spi->q2spi_cr_hdr_event; + q2spi_cr_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_cr_packet)); + if (!q2spi_cr_pkt) { + Q2SPI_ERROR(q2spi, "%s Err Invaldi q2spi_cr_pkt\n", __func__); + return -ENOMEM; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt:%p\n", __func__, q2spi_cr_pkt); + q2spi->cr_pkt = q2spi_cr_pkt; + spin_lock_irqsave(&q2spi->cr_queue_lock, flags); + q2spi_cr_pkt->no_of_valid_crs = q2spi_cr_hdr_event->byte0_len; + Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt hdr:%p hdr_0:0x%x no_of_crs=%d\n", + __func__, q2spi_cr_pkt->cr_hdr, q2spi_cr_hdr_event->cr_hdr_0, + q2spi_cr_pkt->no_of_valid_crs); + + if (q2spi_cr_hdr_event->byte0_err) + Q2SPI_DEBUG(q2spi, "%s Error: q2spi_cr_hdr_event->byte0_err=%d\n", + __func__, q2spi_cr_hdr_event->byte0_err); + + for (i = 0; i < q2spi_cr_hdr_event->byte0_len; i++) { + q2spi_cr_pkt->cr_hdr[i] = q2spi_kzalloc(q2spi, sizeof(struct q2spi_cr_header)); + if (!q2spi_cr_pkt->cr_hdr[i]) { + Q2SPI_ERROR(q2spi, "%s Err cr_hdr is NULL\n", __func__); + return -ENOMEM; + } else if (i == 0) { + q2spi_cr_pkt->cr_hdr[i]->cmd = (q2spi_cr_hdr_event->cr_hdr_0) & 0xF; + q2spi_cr_pkt->cr_hdr[i]->flow = (q2spi_cr_hdr_event->cr_hdr_0 >> 4) & 0x1; + q2spi_cr_pkt->cr_hdr[i]->type = (q2spi_cr_hdr_event->cr_hdr_0 >> 5) & 0x3; + q2spi_cr_pkt->cr_hdr[i]->parity = (q2spi_cr_hdr_event->cr_hdr_0 >> 7) & 0x1; + } else if (i == 1) { + q2spi_cr_pkt->cr_hdr[i]->cmd = (q2spi_cr_hdr_event->cr_hdr_1) & 0xF; + q2spi_cr_pkt->cr_hdr[i]->flow = (q2spi_cr_hdr_event->cr_hdr_1 >> 4) & 0x1; + q2spi_cr_pkt->cr_hdr[i]->type = (q2spi_cr_hdr_event->cr_hdr_1 >> 5) & 0x3; + q2spi_cr_pkt->cr_hdr[i]->parity = (q2spi_cr_hdr_event->cr_hdr_1 >> 7) & 0x1; + } else if (i == 2) { + q2spi_cr_pkt->cr_hdr[i]->cmd = (q2spi_cr_hdr_event->cr_hdr_2) & 0xF; + q2spi_cr_pkt->cr_hdr[i]->flow = (q2spi_cr_hdr_event->cr_hdr_2 >> 4) & 0x1; + q2spi_cr_pkt->cr_hdr[i]->type = (q2spi_cr_hdr_event->cr_hdr_2 >> 5) & 0x3; + q2spi_cr_pkt->cr_hdr[i]->parity = (q2spi_cr_hdr_event->cr_hdr_2 >> 7) & 0x1; + } else if (i == 3) { + q2spi_cr_pkt->cr_hdr[i]->cmd = (q2spi_cr_hdr_event->cr_hdr_3) & 0xF; + q2spi_cr_pkt->cr_hdr[i]->flow = (q2spi_cr_hdr_event->cr_hdr_3 >> 4) & 0x1; + q2spi_cr_pkt->cr_hdr[i]->type = (q2spi_cr_hdr_event->cr_hdr_3 >> 5) & 0x3; + q2spi_cr_pkt->cr_hdr[i]->parity = (q2spi_cr_hdr_event->cr_hdr_3 >> 7) & 0x1; + } + Q2SPI_DEBUG(q2spi, "%s CR HDR[%d] cmd/opcode:%d mc_flow:%d type:%d parity:%d\n", + __func__, i, q2spi_cr_pkt->cr_hdr[i]->cmd, + q2spi_cr_pkt->cr_hdr[i]->flow, q2spi_cr_pkt->cr_hdr[i]->type, + q2spi_cr_pkt->cr_hdr[i]->parity); + } + q2spi_cr_pkt->xfer = q2spi->xfer; + spin_unlock_irqrestore(&q2spi->cr_queue_lock, flags); + return ret; +} + +static int q2spi_open(struct inode *inode, struct file *filp) +{ + struct cdev *cdev; + char path[256]; + struct q2spi_chrdev *q2spi_cdev; + struct q2spi_geni *q2spi; + int ret = 0, rc = 0; + + rc = iminor(inode); + cdev = inode->i_cdev; + q2spi_cdev = container_of(cdev, struct q2spi_chrdev, cdev[rc]); + if (!q2spi_cdev) { + pr_err("%s Err q2spi_cdev NULL\n", __func__); + return -EINVAL; + } + + q2spi = container_of(q2spi_cdev, struct q2spi_geni, chrdev); + if (!q2spi) { + pr_err("%s Err q2spi NULL\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "%s PID:%d, allocs=%d\n", __func__, current->pid, q2spi_alloc_count); + q2spi->init = true; + /* Q2SPI slave HPG 2.1 Initialization */ + ret = q2spi_slave_init(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err Failed to init q2spi slave %d\n", + __func__, ret); + return ret; + } + if (!q2spi->doorbell_setup) { + ret = q2spi_map_doorbell_rx_buf(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf\n", __func__); + return ret; + } + } + filp->private_data = q2spi; + Q2SPI_DEBUG(q2spi, "%s End PID:%d, allocs:%d\n", __func__, current->pid, q2spi_alloc_count); + Q2SPI_DEBUG(q2spi, "%s q2spi:%p q2spi_cdev:%p rc:%d cdev[rc]:%p cdev[rc]:%p\n", + __func__, q2spi, q2spi_cdev, rc, cdev[rc], cdev[rc]); + Q2SPI_DEBUG(q2spi, "%s check path find device descriptor for file %s\n", + __func__, d_path(&filp->f_path, path, 256)); + return 0; +} + +/** + * q2spi_get_variant_buf - Get one buffer allocated from pre allocated buffers + * @q2spi: Pointer to main q2spi_geni structure + * @q2spi_pkt: pointer to q2spi packet + * @vtype: variant type in q2spi_pkt + * + * This function get one buffer allocated using q2spi_pre_alloc_buffers() based on variant type + * specified in q2spi packet. + * + * Return: 0 for success, negative number for error condition. + */ +static inline void *q2spi_get_variant_buf(struct q2spi_geni *q2spi, + struct q2spi_packet *q2spi_pkt, enum var_type vtype) +{ + int i; + + if (vtype != VARIANT_1 && vtype != VARIANT_5) { + Q2SPI_ERROR(q2spi, "%s Err Invalid variant:%d!\n", __func__, vtype); + return NULL; + } + + /* Pick buffers from pre allocated pool */ + if (vtype == VARIANT_1) { + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->var1_buf_used[i]) + break; + } + if (i < Q2SPI_MAX_BUF) { + q2spi->var1_buf_used[i] = q2spi->var1_buf[i]; + q2spi_pkt->var1_tx_dma = q2spi->var1_dma_buf[i]; + Q2SPI_DEBUG(q2spi, "%s ALLOC var1 i:%d vir1_buf:%p phy_dma_buf:%p\n", + __func__, i, (void *)q2spi->var1_buf[i], + q2spi->var1_dma_buf[i]); + return (void *)q2spi->var1_buf[i]; + } + } else if (vtype == VARIANT_5) { + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->var5_buf_used[i]) + break; + } + if (i < Q2SPI_MAX_BUF) { + q2spi->var5_buf_used[i] = q2spi->var5_buf[i]; + q2spi_pkt->var5_tx_dma = q2spi->var5_dma_buf[i]; + Q2SPI_DEBUG(q2spi, "%s ALLOC var5 i:%d vir5_buf:%p phy_dma_buf:%p\n", + __func__, i, (void *)q2spi->var5_buf[i], + q2spi->var5_dma_buf[i]); + return (void *)q2spi->var5_buf[i]; + } + } + Q2SPI_ERROR(q2spi, "%s Err Short of buffers for variant:%d!\n", __func__, vtype); + return NULL; +} + +/** + * q2spi_alloc_xfer_tid() - Allocate a tid to q2spi transfer request + * @q2spi: Pointer to main q2spi_geni structure + * + * Return: zero on success with valid xfer->tid and error code on failures. + */ +int q2spi_alloc_xfer_tid(struct q2spi_geni *q2spi) +{ + unsigned long flags; + int tid = 0; + + spin_lock_irqsave(&q2spi->txn_lock, flags); + tid = idr_alloc_cyclic(&q2spi->tid_idr, q2spi, Q2SPI_START_TID_ID, + Q2SPI_END_TID_ID, GFP_ATOMIC); + if (tid < Q2SPI_START_TID_ID || tid > Q2SPI_END_TID_ID) { + Q2SPI_ERROR(q2spi, "%s Err Invalid tid:%d\n", __func__, tid); + spin_unlock_irqrestore(&q2spi->txn_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&q2spi->txn_lock, flags); + return tid; +} + +/** + * q2spi_free_xfer_tid() - Freee tid of xfer + * @q2spi: Pointer to main q2spi_geni structure + * + */ +void q2spi_free_xfer_tid(struct q2spi_geni *q2spi, int tid) +{ + unsigned long flags; + + spin_lock_irqsave(&q2spi->txn_lock, flags); + idr_remove(&q2spi->tid_idr, tid); + spin_unlock_irqrestore(&q2spi->txn_lock, flags); +} + +static unsigned int +q2spi_get_dw_offset(struct q2spi_geni *q2spi, enum cmd_type c_type, unsigned int reg_offset) +{ + unsigned int offset = 0, remainder = 0, quotient = 0; + + offset = reg_offset / Q2SPI_OFFSET_MASK; + Q2SPI_DEBUG(q2spi, "%s type:%d offset:%d remainder:%d quotient:%d\n", + __func__, c_type, offset, remainder, quotient); + return offset; +} + +int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, + struct q2spi_packet **q2spi_pkt_ptr) +{ + struct q2spi_packet *q2spi_pkt; + struct q2spi_host_variant1_pkt *q2spi_hc_var1; + int ret; + unsigned int dw_offset = 0; + + q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet)); + if (!q2spi_pkt) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); + return -ENOMEM; + } + memset(q2spi_pkt, 0, sizeof(struct q2spi_packet)); + *q2spi_pkt_ptr = q2spi_pkt; + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->list:%p next:%p prev:%p\n", __func__, &q2spi_pkt->list, + &q2spi_pkt->list.next, &q2spi_pkt->list.prev); + q2spi_hc_var1 = (struct q2spi_host_variant1_pkt *) + q2spi_get_variant_buf(q2spi, q2spi_pkt, VARIANT_1); + if (!q2spi_hc_var1) { + Q2SPI_DEBUG(q2spi, "%s Err Invalid q2spi_hc_var1\n", __func__); + return -ENOMEM; + } + Q2SPI_DEBUG(q2spi, "%s var_1:%p var_1_phy:%p q2spi_req:%p cmd:%d\n", + __func__, q2spi_hc_var1, q2spi_pkt->var1_tx_dma, q2spi_req, q2spi_req.cmd); + if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == HRF_READ) { + q2spi_hc_var1->cmd = HC_DATA_READ; + q2spi_pkt->m_cmd_param = Q2SPI_TX_RX; + ret = q2spi_alloc_rx_buf(q2spi, q2spi_req.data_len); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf", __func__); + return -ENOMEM; + } + } else if (q2spi_req.cmd == LOCAL_REG_WRITE || q2spi_req.cmd == HRF_WRITE) { + q2spi_hc_var1->cmd = HC_DATA_WRITE; + q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY; + q2spi_req.data_len = sizeof(q2spi_hc_var1->data_buf) <= q2spi_req.data_len ? + sizeof(q2spi_hc_var1->data_buf) : q2spi_req.data_len; + memcpy(q2spi_hc_var1->data_buf, q2spi_req.data_buff, q2spi_req.data_len); + Q2SPI_DEBUG(q2spi, "%s q2spi_req_buff:0x%x\n", + __func__, *((int *)q2spi_req.data_buff)); + q2spi_kfree(q2spi, q2spi_req.data_buff); + } + q2spi_hc_var1->flow = MC_FLOW; + q2spi_hc_var1->interrupt = CLIENT_INTERRUPT; + q2spi_hc_var1->seg_last = SEGMENT_LST; + if (q2spi_req.data_len % 4) + q2spi_hc_var1->dw_len = (q2spi_req.data_len / 4); + else + q2spi_hc_var1->dw_len = (q2spi_req.data_len / 4) - 1; + q2spi_hc_var1->access_type = LOCAL_REG_ACCESS; + q2spi_hc_var1->address_mode = CLIENT_ADDRESS; + Q2SPI_DEBUG(q2spi, "%s data_len:%d dw_len:%d req_flow_id:%d\n", + __func__, q2spi_req.data_len, q2spi_hc_var1->dw_len, q2spi_req.flow_id); + if (!q2spi_req.flow_id && !q2spi->hrf_flow) { + ret = q2spi_alloc_xfer_tid(q2spi); + if (ret < 0) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc xfer_tid\n", __func__); + return -EINVAL; + } + q2spi_hc_var1->flow_id = ret; + } else { + q2spi_hc_var1->flow_id = q2spi_req.flow_id; + } + q2spi->xfer->tid = q2spi_hc_var1->flow_id; + dw_offset = q2spi_get_dw_offset(q2spi, q2spi_req.cmd, q2spi_req.addr); + q2spi_hc_var1->reg_offset = dw_offset; + q2spi_pkt->var1_pkt = q2spi_hc_var1; + q2spi_pkt->vtype = VARIANT_1; + q2spi_pkt->valid = true; + q2spi_pkt->sync = q2spi_req.sync; + + Q2SPI_DEBUG(q2spi, "%s *q2spi_pkt_ptr:%p End ret flow_id:%d\n", + __func__, *q2spi_pkt_ptr, q2spi_hc_var1->flow_id); + return q2spi_hc_var1->flow_id; +} + +int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, + struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_host_variant4_5_pkt *q2spi_hc_var5; + int ret = 0, flow_id; + + if (!q2spi) { + Q2SPI_ERROR(q2spi, "%s Err q2spi NULL\n", __func__); + return -EINVAL; + } + if (!q2spi_pkt) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n", + __func__, q2spi_pkt, q2spi_pkt->var1_pkt, q2spi_pkt->var5_tx_dma, + q2spi_pkt->var5_pkt); + Q2SPI_DEBUG(q2spi, "%s q2spi_req:%p req_cmd:%d req_addr:%d req_len:%d req_data_buf:%p\n", + __func__, q2spi_req, q2spi_req.cmd, q2spi_req.addr, q2spi_req.data_len, + q2spi_req.data_buff); + + q2spi_hc_var5 = (struct q2spi_host_variant4_5_pkt *) + q2spi_get_variant_buf(q2spi, q2spi_pkt, VARIANT_5); + if (!q2spi_hc_var5) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_hc_var5\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "%s var_5:%p q2spi_req:%p cmd:%d\n", + __func__, q2spi_hc_var5, q2spi_req, q2spi_req.cmd); + Q2SPI_DEBUG(q2spi, "%s pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n", + __func__, q2spi_pkt->var1_pkt, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt); + if (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) { + Q2SPI_ERROR(q2spi, "%s Err (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) %d return\n", + __func__, q2spi_req.data_len); + return -ENOMEM; + } + + if (q2spi_req.cmd == DATA_READ || q2spi_req.cmd == HRF_READ) { + q2spi_hc_var5->cmd = HC_SMA_READ; + q2spi_pkt->m_cmd_param = Q2SPI_TX_RX; + ret = q2spi_alloc_rx_buf(q2spi, q2spi_req.data_len); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc RX DMA buf\n", __func__); + return -ENOMEM; + } + } else if (q2spi_req.cmd == DATA_WRITE || q2spi_req.cmd == HRF_WRITE) { + q2spi_hc_var5->cmd = HC_SMA_WRITE; + q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY; + Q2SPI_DEBUG(q2spi, "%s req_data_buf:%p\n", __func__, q2spi_req.data_buff); + q2spi_req.data_len = sizeof(q2spi_hc_var5->data_buf) <= q2spi_req.data_len ? + sizeof(q2spi_hc_var5->data_buf) : q2spi_req.data_len; + memcpy(q2spi_hc_var5->data_buf, q2spi_req.data_buff, q2spi_req.data_len); + q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format q2spi_req data_buf", + (char *)q2spi_req.data_buff, q2spi_req.data_len); + q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format var5 data_buf", + (char *)q2spi_hc_var5->data_buf, q2spi_req.data_len); + q2spi_kfree(q2spi, q2spi_req.data_buff); + } + if (q2spi_req.flow_id < Q2SPI_END_TID_ID) + q2spi_hc_var5->flow = MC_FLOW; + else + q2spi_hc_var5->flow = CM_FLOW; + q2spi_hc_var5->interrupt = CLIENT_INTERRUPT; + q2spi_hc_var5->seg_last = SEGMENT_LST; + q2spi_pkt->data_length = q2spi_req.data_len; + if (q2spi_req.data_len % 4) { + q2spi_hc_var5->dw_len_part1 = (q2spi_req.data_len / 4); + q2spi_hc_var5->dw_len_part2 = (q2spi_req.data_len / 4) >> 2; + } else { + q2spi_hc_var5->dw_len_part1 = (q2spi_req.data_len / 4) - 1; + q2spi_hc_var5->dw_len_part2 = ((q2spi_req.data_len / 4) - 1) >> 2; + } + Q2SPI_DEBUG(q2spi, "dw_len_part1:%d dw_len_part2:%d\n", + q2spi_hc_var5->dw_len_part1, q2spi_hc_var5->dw_len_part2); + q2spi_hc_var5->access_type = SYSTEM_MEMORY_ACCESS; + q2spi_hc_var5->address_mode = NO_CLIENT_ADDRESS; + if (!q2spi_req.flow_id && !q2spi->hrf_flow) { + flow_id = q2spi_alloc_xfer_tid(q2spi); + if (flow_id < 0) { + Q2SPI_ERROR(q2spi, "%s Err failed to alloc tid", __func__); + return -EINVAL; + } + q2spi_hc_var5->flow_id = flow_id; + } else { + if (q2spi_req.flow_id < Q2SPI_END_TID_ID) + q2spi_hc_var5->flow_id = q2spi_pkt->hrf_flow_id; + else + q2spi_hc_var5->flow_id = q2spi_req.flow_id; + } + q2spi->xfer->tid = q2spi_hc_var5->flow_id; + q2spi_pkt->var5_pkt = q2spi_hc_var5; + q2spi_pkt->vtype = VARIANT_5; + q2spi_pkt->valid = true; + q2spi_pkt->sync = q2spi_req.sync; + Q2SPI_DEBUG(q2spi, "%s flow id:%d q2spi_pkt:%p pkt_var1:%p pkt_tx_dma:%p var5_pkt:%p\n", + __func__, q2spi_hc_var5->flow_id, q2spi_pkt, + q2spi_pkt->var1_pkt, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt); + q2spi_dump_ipc(q2spi, q2spi->ipc, "sma format var5(2) data_buf", + (char *)q2spi_hc_var5->data_buf, q2spi_req.data_len); + return q2spi_hc_var5->flow_id; +} + +static int q2spi_abort_command(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, + struct q2spi_packet **q2spi_pkt_ptr) +{ + struct q2spi_host_abort_pkt *q2spi_abort_req; + struct q2spi_packet *q2spi_pkt; + + if (!q2spi) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_req:%p cmd:%d addr:%d flow_id:%d data_len:%d\n", + __func__, q2spi_req, q2spi_req.cmd, q2spi_req.addr, + q2spi_req.flow_id, q2spi_req.data_len); + q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet)); + if (!q2spi_pkt) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); + return -ENOMEM; + } + + *q2spi_pkt_ptr = q2spi_pkt; + + q2spi_abort_req = q2spi_alloc_host_variant(q2spi, sizeof(struct q2spi_host_abort_pkt)); + if (!q2spi_abort_req) { + Q2SPI_ERROR(q2spi, "%s Err alloc and map failed\n", __func__); + return -EINVAL; + } + + q2spi_abort_req->cmd = HC_ABORT; + q2spi_abort_req->flow_id = q2spi_alloc_xfer_tid(q2spi); + q2spi->xfer->tid = q2spi_abort_req->flow_id; + q2spi_abort_req->code = 0; + q2spi_pkt->abort_pkt = q2spi_abort_req; + q2spi_pkt->vtype = VAR_ABORT; + q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY; + + return q2spi_abort_req->flow_id; +} + +static int q2spi_soft_reset(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, + struct q2spi_packet **q2spi_pkt_ptr) +{ + struct q2spi_host_soft_reset_pkt *q2spi_softreset_req; + struct q2spi_packet *q2spi_pkt; + + if (!q2spi) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_req:%p cmd:%d addr:%d flow_id:%d data_len:%d\n", + __func__, q2spi_req, q2spi_req.cmd, q2spi_req.addr, + q2spi_req.flow_id, q2spi_req.data_len); + q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet)); + if (!q2spi_pkt) { + Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); + return -ENOMEM; + } + + *q2spi_pkt_ptr = q2spi_pkt; + + q2spi_softreset_req = q2spi_alloc_host_variant(q2spi, + sizeof(struct q2spi_host_soft_reset_pkt)); + if (!q2spi_softreset_req) { + Q2SPI_ERROR(q2spi, "%s Err alloc and map failed\n", __func__); + return -EINVAL; + } + q2spi_softreset_req->cmd = HC_SOFT_RESET; + q2spi_softreset_req->flags = HC_SOFT_RESET_FLAGS; + q2spi_softreset_req->code = HC_SOFT_RESET_CODE; + q2spi_softreset_req->flow_id = q2spi_alloc_xfer_tid(q2spi); + q2spi->xfer->tid = q2spi_softreset_req->flow_id; + q2spi_pkt->soft_reset_pkt = q2spi_softreset_req; + q2spi_pkt->vtype = VAR_SOFT_RESET; + q2spi_pkt->m_cmd_param = Q2SPI_TX_ONLY; + + return q2spi_softreset_req->flow_id; +} + +void q2spi_notify_data_avail_for_client(struct q2spi_geni *q2spi) +{ + Q2SPI_DEBUG(q2spi, "%s wake userspace\n", __func__); + atomic_inc(&q2spi->rx_avail); + wake_up_interruptible(&q2spi->readq); + wake_up(&q2spi->read_wq); +} + +void q2spi_add_req_to_rx_queue(struct q2spi_geni *q2spi, u32 status, u32 cmd) +{ + struct q2spi_packet *q2spi_pkt; + + Q2SPI_DEBUG(q2spi, "%s status: 0x%x init:%d cmd:%d\n", __func__, status, q2spi->init, cmd); + if (q2spi->init) { + Q2SPI_DEBUG(q2spi, "%s Completed transfer PID=%d\n", __func__, current->pid); + complete_all(&q2spi->sync_wait); + return; + } + + Q2SPI_DEBUG(q2spi, "%s tx_list:%p tx_list_next:%p tx_list_prev:%p\n", + __func__, &q2spi->tx_queue_list, &q2spi->tx_queue_list.next, + &q2spi->tx_queue_list.prev); + q2spi_pkt = list_first_entry(&q2spi->tx_queue_list, struct q2spi_packet, list); + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p in_use=%d\n", __func__, q2spi_pkt, q2spi_pkt->in_use); + q2spi_pkt->in_use = false; + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt=%p &q2spi_pkt->list=0x%p &q2spi->tx_queue_list=0x%p\n", + __func__, q2spi_pkt, &q2spi_pkt->list, &q2spi->tx_queue_list); + q2spi_pkt->status = SUCCESS; + Q2SPI_DEBUG(q2spi, "%s tx_ist:%p tx_list_next:%p tx_list_prev:%p\n", + __func__, &q2spi->tx_queue_list, &q2spi->tx_queue_list.next, + &q2spi->tx_queue_list.prev); + if (q2spi_pkt->vtype != VARIANT_1_HRF) { + list_del(&q2spi_pkt->list); + Q2SPI_DEBUG(q2spi, "%s &q2spi_pkt:%p tx_list:%p tx_list_next:%p tx_list_prev:%p\n", + __func__, &q2spi_pkt, &q2spi->tx_queue_list, &q2spi->tx_queue_list.next, + &q2spi->tx_queue_list.prev); + Q2SPI_DEBUG(q2spi, + "%s q2spi_pkt_list:%p next:%p prev:%p rx_list:%p next:%p prev:%p\n", + __func__, &q2spi_pkt->list, &q2spi_pkt->list.next, + &q2spi_pkt->list.prev, &q2spi->rx_queue_list, + &q2spi->rx_queue_list.next, &q2spi->rx_queue_list.prev); + list_add_tail(&q2spi_pkt->list, &q2spi->rx_queue_list); + Q2SPI_DEBUG(q2spi, "%s rx_list:%p rx_list_next:%p rx_list_prev:%p\n", + __func__, &q2spi->rx_queue_list, &q2spi->rx_queue_list.next, + &q2spi->rx_queue_list.prev); + if (q2spi_pkt->sync) { + complete_all(&q2spi->sync_wait); + } else { + /* Notify poll */ + q2spi_notify_data_avail_for_client(q2spi); + } + } +} + +int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req) +{ + struct q2spi_request *q2spi_hrf_req; + struct q2spi_packet *q2spi_pkt; + int ret = 0; + + q2spi->hrf_flow = true; + + ret = q2spi_hrf_entry_format(q2spi, q2spi_req, &q2spi_hrf_req); + if (ret < 0) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_hrf_entry_format failed ret:%d\n", __func__, ret); + return ret; + } + + Q2SPI_DEBUG(q2spi, "%s q2spi req:%p cmd:%d flow_id:%d data_buff:%p\n", + __func__, q2spi_req, q2spi_req.cmd, q2spi_req.flow_id, q2spi_req.data_buff); + Q2SPI_DEBUG(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n", + __func__, q2spi_req.addr, q2spi_req.proto_ind, q2spi_req.data_len); + + ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt); + Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", + __func__, q2spi_hrf_req, q2spi_pkt); + if (ret < 0) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); + return ret; + } + + q2spi_pkt->hrf_flow_id = ret; + ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); + return ret; + } + list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); + q2spi_pkt->vtype = VARIANT_1_HRF; + q2spi_kfree(q2spi, q2spi_hrf_req); + Q2SPI_DEBUG(q2spi, "%s q2spi_req:%p q2spi_pkt:%p\n", __func__, q2spi_req, q2spi_pkt); + return ret; +} + +void q2spi_print_req_cmd(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req) +{ + if (q2spi_req.cmd == LOCAL_REG_READ) + Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_READ\n", __func__); + else if (q2spi_req.cmd == LOCAL_REG_WRITE) + Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_WRITE\n", __func__); + else if (q2spi_req.cmd == HRF_READ) + Q2SPI_DEBUG(q2spi, "%s cmd:HRF_READ\n", __func__); + else if (q2spi_req.cmd == HRF_WRITE) + Q2SPI_DEBUG(q2spi, "%s cmd:HRF_WRITE\n", __func__); + else if (q2spi_req.cmd == DATA_READ) + Q2SPI_DEBUG(q2spi, "%s cmd:DATA_READ\n", __func__); + else if (q2spi_req.cmd == DATA_WRITE) + Q2SPI_DEBUG(q2spi, "%s cmd:DATA_WRITE\n", __func__); + else + Q2SPI_DEBUG(q2spi, "%s Invalid cmd:%d\n", __func__, q2spi_req.cmd); +} + +int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req) +{ + struct q2spi_packet *q2spi_pkt; + int ret = -EINVAL; + + q2spi_print_req_cmd(q2spi, q2spi_req); + Q2SPI_DEBUG(q2spi, "%s list_empty:%d\n", + __func__, list_empty(&q2spi->tx_queue_list)); + if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == LOCAL_REG_WRITE) { + ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "q2spi_frame_lra failed ret:%d\n", ret); + return ret; + } + list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); + } else if (q2spi_req.cmd == DATA_READ || q2spi_req.cmd == DATA_WRITE) { + q2spi_pkt = q2spi_kzalloc(q2spi, sizeof(struct q2spi_packet)); + if (!q2spi_pkt) { + Q2SPI_DEBUG(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); + return -ENOMEM; + } + ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "q2spi_frame_lra failed ret:%d\n", ret); + return ret; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p in_use=%d ret:%d\n", + __func__, q2spi_pkt, q2spi_pkt->in_use, ret); + list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); + } else if (q2spi_req.cmd == HRF_READ || q2spi_req.cmd == HRF_WRITE) { + ret = q2spi_hrf_flow(q2spi, q2spi_req); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "q2spi_hrf_flow failed ret:%d\n", ret); + return ret; + } + } else if (q2spi_req.cmd == ABORT) { + ret = q2spi_abort_command(q2spi, q2spi_req, &q2spi_pkt); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "abort_command failed ret:%d\n", ret); + return ret; + } + list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); + } else if (q2spi_req.cmd == SOFT_RESET) { + ret = q2spi_soft_reset(q2spi, q2spi_req, &q2spi_pkt); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "soft_reset failed ret:%d\n", ret); + return ret; + } + list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); + } else { + Q2SPI_ERROR(q2spi, "%s Err cmd:%d\n", __func__, q2spi_req.cmd); + return -EINVAL; + } + + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p req_cmd:%d\n", __func__, q2spi_pkt, q2spi_req.cmd); + Q2SPI_DEBUG(q2spi, "%s ret:%d\n", __func__, ret); + return ret; +} + +bool q2spi_cmd_type_valid(struct q2spi_request q2spi_req) +{ + if (q2spi_req.cmd != LOCAL_REG_READ && + q2spi_req.cmd != LOCAL_REG_WRITE && + q2spi_req.cmd != DATA_READ && + q2spi_req.cmd != DATA_WRITE && + q2spi_req.cmd != HRF_READ && + q2spi_req.cmd != HRF_WRITE && + q2spi_req.cmd != SOFT_RESET && + q2spi_req.cmd != ABORT) { + pr_err("%s Err Invalid cmd type %d\n", __func__, q2spi_req.cmd); + return false; + } + + if (!q2spi_req.data_len) { + pr_err("%s Invalid data len %d bytes\n", __func__, q2spi_req.data_len); + return false; + } + return true; +} + +static int q2spi_check_var1_avail_buff(struct q2spi_geni *q2spi) +{ + unsigned int i, count = 0; + + for (i = 0; i < Q2SPI_MAX_BUF; i++) { + if (!q2spi->var1_buf_used[i]) + count++; + else + Q2SPI_DEBUG(q2spi, "%s Var1 buffer in use %p\n", + __func__, q2spi->var1_buf_used[i]); + } + return count; +} + +static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t len, loff_t *f_pos) +{ + struct q2spi_geni *q2spi; + struct q2spi_request q2spi_req; + int flow_id = 0; + unsigned long timeout = 0, xfer_timeout = 0; + void *data_buf = NULL; + + if (!filp || !buf || !len || !filp->private_data) { + pr_err("%s Err Null pointer\n", __func__); + return -EINVAL; + } + + q2spi = filp->private_data; + Q2SPI_DEBUG(q2spi, "%s Enter PID=%d free_buffers:%d\n", + __func__, current->pid, q2spi_check_var1_avail_buff(q2spi)); + if (!q2spi_check_var1_avail_buff(q2spi)) { + Q2SPI_ERROR(q2spi, "%s Err Short of var1 buffers\n", __func__); + return -EAGAIN; + } + + if (len != sizeof(struct q2spi_request)) { + Q2SPI_ERROR(q2spi, "%s Err Invalid length %d Expected %d\n", + __func__, len, sizeof(struct q2spi_request)); + return -EINVAL; + } + + if (copy_from_user(&q2spi_req, buf, sizeof(struct q2spi_request))) { + Q2SPI_DEBUG(q2spi, "%s Err copy_from_user failed\n", __func__); + return -EFAULT; + } + Q2SPI_DEBUG(q2spi, "%s userspace q2spi_req:%p\n", __func__, q2spi_req); + Q2SPI_DEBUG(q2spi, "%s cmd:%d data_len:%d addr:%d proto:%d end:%d\n", + __func__, q2spi_req.cmd, q2spi_req.data_len, q2spi_req.addr, + q2spi_req.proto_ind, q2spi_req.end_point); + Q2SPI_DEBUG(q2spi, "%s priority:%d flow_id:%d sync:%d data_buf:%x\n", __func__, + q2spi_req.priority, q2spi_req.flow_id, q2spi_req.sync, q2spi_req.data_buff); + + if (!q2spi_cmd_type_valid(q2spi_req)) + return -EINVAL; + + if (q2spi_req.cmd == HRF_WRITE) { + q2spi_req.addr = Q2SPI_HRF_PUSH_ADDRESS; + q2spi_req.sync = 1; + q2spi_req.priority = 1; + q2spi_req.data_len += ((q2spi_req.data_len % DATA_WORD_LEN) ? + (DATA_WORD_LEN - (q2spi_req.data_len % DATA_WORD_LEN)) : 0); + } + + if (q2spi_req.cmd == LOCAL_REG_WRITE || q2spi_req.cmd == DATA_WRITE || + q2spi_req.cmd == HRF_WRITE) { + data_buf = q2spi_kzalloc(q2spi, q2spi_req.data_len); + if (!data_buf) { + Q2SPI_DEBUG(q2spi, "%s Err buffer alloc failed\n", __func__); + return -ENOMEM; + } + + if (copy_from_user(data_buf, q2spi_req.data_buff, q2spi_req.data_len)) { + Q2SPI_DEBUG(q2spi, "%s Err copy_from_user failed\n", __func__); + kfree(data_buf); + return -EFAULT; + } + + q2spi_dump_ipc(q2spi, q2spi->ipc, "q2psi_transfer", (char *)data_buf, + q2spi_req.data_len); + q2spi_req.data_buff = data_buf; + } + + mutex_lock(&q2spi->queue_lock); + reinit_completion(&q2spi->sync_wait); + flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req); + Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, flow_id); + if (flow_id < 0) { + mutex_unlock(&q2spi->queue_lock); + Q2SPI_DEBUG(q2spi, "%s Err Failed to add tx request ret:%d\n", __func__, flow_id); + return -ENOMEM; + } + mutex_unlock(&q2spi->queue_lock); + kthread_queue_work(q2spi->kworker, &q2spi->send_messages); + + if (q2spi_req.sync) { + xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); + timeout = wait_for_completion_interruptible_timeout + (&q2spi->sync_wait, xfer_timeout); + if (timeout <= 0) { + Q2SPI_DEBUG(q2spi, "%s Err timeout for sync_wait\n", __func__); + return -ETIMEDOUT; + } + Q2SPI_DEBUG(q2spi, "%s sync_wait completed\n", __func__); + + Q2SPI_DEBUG(q2spi, "%s free_buffers available:%d\n", + __func__, q2spi_check_var1_avail_buff(q2spi)); + if (q2spi_req.cmd == LOCAL_REG_READ) { + if (copy_to_user(q2spi_req.data_buff, q2spi->xfer->rx_buf, + q2spi_req.data_len)) { + Q2SPI_DEBUG(q2spi, "%s Err copy_to_user fail\n", __func__); + kfree(data_buf); + return -EFAULT; + } + } else { + Q2SPI_DEBUG(q2spi, "%s ret len:%d\n", __func__, len); + return len; + } + } + + /* + * return flow_id for async case so that userspace can match this flow_id + * or the responses received asynchrously + */ + Q2SPI_DEBUG(q2spi, "%s return flow_id:%d\n", __func__, flow_id); + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + return flow_id; +} + +static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, loff_t *ppos) +{ + struct q2spi_geni *q2spi; + struct q2spi_packet *q2spi_pkt = NULL; + struct q2spi_host_variant1_pkt *q2spi_hc_var1; + struct q2spi_host_variant4_5_pkt *q2spi_req; + struct q2spi_host_abort_pkt *q2spi_abort_req; + struct q2spi_host_soft_reset_pkt *q2spi_softreset_req; + struct q2spi_client_request cr_request; + struct q2spi_cr_packet *q2spi_cr_pkt = NULL; + struct q2spi_client_dma_pkt *q2spi_cr_var3; + int ret = 0, dw_len = 0, i = 0, no_of_crs = 0; + + if (!filp || !buf || !count || !filp->private_data) { + pr_err("%s Err Null pointer\n", __func__); + return -EINVAL; + } + + q2spi = filp->private_data; + + Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DEBUG(q2spi, "%s list_empty_rx_list:%d list_empty_cr_list:%d\n", + __func__, list_empty(&q2spi->rx_queue_list), list_empty(&q2spi->cr_queue_list)); + if (copy_from_user(&cr_request, buf, sizeof(struct q2spi_client_request)) != 0) { + Q2SPI_ERROR(q2spi, "%s copy from user failed PID=%d\n", __func__, current->pid); + return -EFAULT; + } + if (!list_empty(&q2spi->rx_queue_list)) { + q2spi_pkt = list_first_entry(&q2spi->rx_queue_list, struct q2spi_packet, list); + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); + if (q2spi_pkt->vtype == VARIANT_1) { + q2spi_hc_var1 = q2spi_pkt->var1_pkt; + if (q2spi_hc_var1->cmd == HC_DATA_READ) + memcpy(&cr_request.data_buff, q2spi_hc_var1->data_buf, + (q2spi_hc_var1->dw_len + 1) * 4); + } else if (q2spi_pkt->vtype == VARIANT_4) { + q2spi_req = q2spi_pkt->var4_pkt; + } else if (q2spi_pkt->vtype == VARIANT_5) { + q2spi_req = q2spi_pkt->var5_pkt; + } else if (q2spi_pkt->vtype == VARIANT_1_HRF) { + q2spi_req = q2spi_pkt->var5_pkt; + } else if (q2spi_pkt->vtype == ABORT) { + q2spi_abort_req = q2spi_pkt->abort_pkt; + } else if (q2spi_pkt->vtype == SOFT_RESET) { + q2spi_softreset_req = q2spi_pkt->soft_reset_pkt; + } + cr_request.status = q2spi_pkt->status; + q2spi_pkt->valid = false; + } + + Q2SPI_DEBUG(q2spi, "%s waiting on wait_event_interruptible\n", __func__); + /* Block on read until CR available in cr_queue_list */ + ret = wait_event_interruptible(q2spi->read_wq, + (!list_empty(&q2spi->cr_queue_list) && + atomic_read(&q2spi->rx_avail))); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "%s Err wait interrupted ret:%d\n", __func__, ret); + return ret; + } + atomic_dec(&q2spi->rx_avail); + Q2SPI_DEBUG(q2spi, "%s wait unblocked ret:%d\n", __func__, ret); + if (!list_empty(&q2spi->cr_queue_list)) { + q2spi_cr_pkt = list_first_entry(&q2spi->cr_queue_list, + struct q2spi_cr_packet, list); + no_of_crs = q2spi_cr_pkt->no_of_valid_crs; + for (i = 0; i < no_of_crs; i++) { + Q2SPI_DEBUG(q2spi, "%s cr_pkt:%p no_of_crs:%d i:%d type:0x%x\n", + __func__, q2spi_cr_pkt, no_of_crs, i, q2spi_cr_pkt->type); + if (((q2spi_cr_pkt->type >> (2 * i)) & GENMASK(1, 0)) == 2) { + q2spi_cr_var3 = &q2spi_cr_pkt->var3_pkt; + Q2SPI_DEBUG(q2spi, "%s q2spi_cr_var3:%p\n", + __func__, q2spi_cr_var3); + Q2SPI_DEBUG(q2spi, "q2spi_cr_var3 len_part1:%d len_part2:%d\n", + q2spi_cr_var3->dw_len_part1, + q2spi_cr_var3->dw_len_part2); + Q2SPI_DEBUG(q2spi, + "q2spi_cr_var3 flow_id:%d arg1:0x%x arg2:0x%x arg3:0x%x\n", + q2spi_cr_var3->flow_id, q2spi_cr_var3->arg1, + q2spi_cr_var3->arg2, q2spi_cr_var3->arg3); + /* + * Doorbell case tid will be updated by client. + * q2spi send the ID to userspce + * so that it will call HC with this flow id for async case + */ + cr_request.flow_id = q2spi_cr_var3->flow_id; + cr_request.cmd = q2spi_cr_pkt->cr_hdr[i]->cmd; + dw_len = (((q2spi_cr_pkt->var3_pkt.dw_len_part3 << 12) & 0xFF) | + ((q2spi_cr_pkt->var3_pkt.dw_len_part2 << 4) & 0xFF) | + q2spi_cr_pkt->var3_pkt.dw_len_part1); + cr_request.data_len = (dw_len * 4) + 4; + cr_request.end_point = q2spi_cr_var3->arg2; + cr_request.proto_ind = q2spi_cr_var3->arg3; + Q2SPI_DEBUG(q2spi, + "%s CR cmd:%d flow_id:%d data_len:%d ep:%d proto:%d status:%d\n", + __func__, cr_request.cmd, cr_request.flow_id, + cr_request.data_len, cr_request.end_point, + cr_request.proto_ind, cr_request.status); + } else if ((q2spi_cr_pkt->type >> (2 * i) & GENMASK(1, 0)) == 1) { + Q2SPI_DEBUG(q2spi, "%s cr_request.flow_id:%d status:%d\n", + __func__, cr_request.flow_id, cr_request.status); + } else { + Q2SPI_ERROR(q2spi, "%s Err Unsupported CR Type\n", __func__); + return -EINVAL; + } + } + } + Q2SPI_DEBUG(q2spi, "q2spi_pkt:%p q2spi_cr_pkt:%p\n", q2spi_pkt, q2spi_cr_pkt); + if (!q2spi_pkt && !q2spi_cr_pkt) { + Q2SPI_ERROR(q2spi, "%s Err No q2spi_pkt or q2spi_cr_pkt\n", __func__); + return -EINVAL; + } + Q2SPI_DEBUG(q2spi, "data_len:%d ep:%d proto:%d cmd%d status%d flow_id:%d", + cr_request.data_len, cr_request.end_point, cr_request.proto_ind, + cr_request.cmd, cr_request.status, cr_request.flow_id); + q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi_response", + (char *)q2spi_cr_pkt->xfer->rx_buf, cr_request.data_len); + ret = copy_to_user(buf, &cr_request, sizeof(struct q2spi_client_request)); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err copy_to_user failed ret:%d", __func__, ret); + return -EAGAIN; + } + ret = copy_to_user(cr_request.data_buff, + (void *)q2spi_cr_pkt->xfer->rx_buf, cr_request.data_len); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err copy_to_user data_buff failed ret:%d", __func__, ret); + return -EAGAIN; + } + ret = (sizeof(struct q2spi_client_request) - ret); + + if (q2spi_cr_pkt) + list_del(&q2spi_cr_pkt->list); + Q2SPI_DEBUG(q2spi, "%s list_empty_rx_list:%d list_empty_cr_list:%d", + __func__, list_empty(&q2spi->rx_queue_list), list_empty(&q2spi->cr_queue_list)); + Q2SPI_DEBUG(q2spi, "%s End ret:%d PID=%d", __func__, ret, current->pid); + return ret; +} + +static __poll_t q2spi_poll(struct file *filp, poll_table *wait) +{ + struct q2spi_geni *q2spi; + __poll_t mask = 0; + + if (!filp || !filp->private_data) { + pr_err("%s Err Null pointer\n", __func__); + return -EINVAL; + } + + q2spi = filp->private_data; + Q2SPI_DEBUG(q2spi, "%s PID:%d\n", __func__, current->pid); + poll_wait(filp, &q2spi->readq, wait); + Q2SPI_DEBUG(q2spi, "%s after poll_wait\n", __func__); + if (atomic_read(&q2spi->rx_avail)) { + mask = (POLLIN | POLLRDNORM); + Q2SPI_DEBUG(q2spi, "%s RX data available\n", __func__); + } + return mask; +} + +static int q2spi_release(struct inode *inode, struct file *filp) +{ + struct q2spi_geni *q2spi; + + if (!filp || !filp->private_data) { + pr_err("%s Err close return\n", __func__); + return -EINVAL; + } + q2spi = filp->private_data; + + Q2SPI_DEBUG(q2spi, "%s End PID:%d allocs:%d\n", + __func__, current->pid, q2spi_alloc_count); + return 0; +} + +static const struct file_operations q2spi_fops = { + .owner = THIS_MODULE, + .open = q2spi_open, + .write = q2spi_transfer, + .read = q2spi_response, + .poll = q2spi_poll, + .release = q2spi_release, +}; + +static int q2spi_se_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, + int *clk_idx, int *clk_div) +{ + unsigned long sclk_freq; + unsigned long res_freq; + struct geni_se *se = &q2spi->se; + int ret = 0; + + ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling), clk_idx, + &sclk_freq, false); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err Failed(%d) to find src clk for 0x%x\n", + __func__, ret, speed_hz); + return ret; + } + + *clk_div = DIV_ROUND_UP(sclk_freq, (q2spi->oversampling * speed_hz)); + + if (!(*clk_div)) { + Q2SPI_ERROR(q2spi, "%s Err sclk:%lu oversampling:%d speed:%u\n", + __func__, sclk_freq, q2spi->oversampling, speed_hz); + return -EINVAL; + } + + res_freq = (sclk_freq / (*clk_div)); + + Q2SPI_DEBUG(q2spi, "%s req speed:%u resultant:%lu sclk:%lu, idx:%d, div:%d\n", + __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div); + + ret = clk_set_rate(se->clk, sclk_freq); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret); + return ret; + } + return 0; +} + +/** + * q2spi_set_clock - Q2SPI SE clock configuration + * @q2spi_geni: controller to process queue + * @clk_hz: SE clock in hz + * + * Set the Serial clock and dividers required as per the + * desired speed. + * + * Return: 0 on success. Error code on failure. + */ +static int q2spi_set_clock(struct q2spi_geni *q2spi, unsigned long clk_hz) +{ + u32 clk_sel, m_clk_cfg, idx, div; + struct geni_se *se = &q2spi->se; + int ret; + + if (clk_hz == q2spi->cur_speed_hz) + return 0; + + ret = q2spi_se_clk_cfg(clk_hz, q2spi, &idx, &div); + if (ret) { + Q2SPI_ERROR(q2spi, "Err setting clk to %lu: %d\n", clk_hz, ret); + return ret; + } + + /* + * Q2SPI core clock gets configured with the requested frequency + * or the frequency closer to the requested frequency. + * For that reason requested frequency is stored in the + * cur_speed_hz and referred in the consecutive transfer instead + * of calling clk_get_rate() API. + */ + q2spi->cur_speed_hz = clk_hz; + + clk_sel = idx & CLK_SEL_MSK; + m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN; + writel(clk_sel, se->base + SE_GENI_CLK_SEL); + writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG); + + Q2SPI_DEBUG(q2spi, "%s spee_hz:%u clk_sel:0x%x m_clk_cfg:0x%x div:%d\n", + __func__, q2spi->cur_speed_hz, clk_sel, m_clk_cfg, div); + return ret; +} + +void q2spi_geni_se_dump_regs(struct q2spi_geni *q2spi) +{ + Q2SPI_ERROR(q2spi, "GENI_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_STATUS)); + Q2SPI_ERROR(q2spi, "SPI_TRANS_CFG: 0x%x\n", geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG)); + Q2SPI_ERROR(q2spi, "SE_GENI_IOS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_IOS)); + Q2SPI_ERROR(q2spi, "SE_GENI_M_CMD0: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_CMD0)); + Q2SPI_ERROR(q2spi, "GENI_M_CMD_CTRL_REG: 0x%x\n", + geni_read_reg(q2spi->base, SE_GENI_M_CMD_CTRL_REG)); + Q2SPI_ERROR(q2spi, "GENI_M_IRQ_STATUS: 0x%x\n", + geni_read_reg(q2spi->base, SE_GENI_M_IRQ_STATUS)); + Q2SPI_ERROR(q2spi, "GENI_M_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_IRQ_EN)); + Q2SPI_ERROR(q2spi, "GENI_TX_FIFO_STATUS: 0x%x\n", + geni_read_reg(q2spi->base, SE_GENI_TX_FIFO_STATUS)); + Q2SPI_ERROR(q2spi, "GENI_RX_FIFO_STATUS: 0x%x\n", + geni_read_reg(q2spi->base, SE_GENI_RX_FIFO_STATUS)); + Q2SPI_ERROR(q2spi, "DMA_TX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_L)); + Q2SPI_ERROR(q2spi, "DMA_TX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_H)); + Q2SPI_ERROR(q2spi, "DMA_TX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_ATTR)); + Q2SPI_ERROR(q2spi, "DMA_TX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN)); + Q2SPI_ERROR(q2spi, "DMA_TX_IRQ_STAT: 0x%x\n", + geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT)); + Q2SPI_ERROR(q2spi, "DMA_TX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN_IN)); + Q2SPI_ERROR(q2spi, "DMA_RX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_L)); + Q2SPI_ERROR(q2spi, "DMA_RX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_H)); + Q2SPI_ERROR(q2spi, "DMA_RX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_ATTR)); + Q2SPI_ERROR(q2spi, "DMA_RX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN)); + Q2SPI_ERROR(q2spi, "DMA_RX_IRQ_STAT: 0x%x\n", + geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT)); + Q2SPI_ERROR(q2spi, "DMA_RX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN_IN)); + Q2SPI_ERROR(q2spi, "DMA_DEBUG_REG0: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_DEBUG_REG0)); +} + +static irqreturn_t q2spi_geni_irq(int irq, void *data) +{ + struct q2spi_geni *q2spi = data; + unsigned int m_irq_status; + unsigned int s_irq_status; + unsigned int dma_tx_status; + unsigned int dma_rx_status; + + m_irq_status = geni_read_reg(q2spi->base, SE_GENI_M_IRQ_STATUS); + s_irq_status = geni_read_reg(q2spi->base, SE_GENI_S_IRQ_STATUS); + dma_tx_status = geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT); + dma_rx_status = geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT); + Q2SPI_DEBUG(q2spi, "%s sirq 0x%x mirq:0x%x dma_tx:0x%x dma_rx:0x%x\n", + __func__, s_irq_status, m_irq_status, dma_tx_status, dma_rx_status); + geni_write_reg(m_irq_status, q2spi->base, SE_GENI_M_IRQ_CLEAR); + geni_write_reg(s_irq_status, q2spi->base, SE_GENI_S_IRQ_CLEAR); + geni_write_reg(dma_tx_status, q2spi->base, SE_DMA_TX_IRQ_CLR); + geni_write_reg(dma_rx_status, q2spi->base, SE_DMA_RX_IRQ_CLR); + + return IRQ_HANDLED; +} + +static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_geni *q2spi = q2spi_pkt->q2spi; + struct q2spi_dma_transfer *xfer = q2spi->xfer; + int ret = 0; + + Q2SPI_DEBUG(q2spi, "%s q2spi:%p xfer:%p\n", __func__, q2spi, xfer); + ret = q2spi_setup_gsi_xfer(q2spi_pkt); /* Todo check it */ + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret); + q2spi_geni_se_dump_regs(q2spi); + gpi_dump_for_geni(q2spi->gsi->tx_c); + goto free_tid; + } + Q2SPI_DEBUG(q2spi, "%s waiting check_gsi_transfer_completion\n", __func__); + ret = check_gsi_transfer_completion(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err completion timeout: %d\n", __func__, ret); + q2spi_geni_se_dump_regs(q2spi); + dev_err(q2spi->dev, "%s Err dump gsi regs\n", __func__); + gpi_dump_for_geni(q2spi->gsi->tx_c); + goto free_tid; + } + +free_tid: + Q2SPI_DEBUG(q2spi, "%s flow_id:%d tx_dma:%p rx_dma:%p tid:%d\n", + __func__, q2spi->xfer->tid, xfer->tx_dma, xfer->rx_dma, q2spi->xfer->tid); + q2spi_unmap_dma_buf_used(q2spi, xfer->tx_dma, xfer->rx_dma); + q2spi_free_xfer_tid(q2spi, q2spi->xfer->tid); + return ret; +} + +static int q2spi_prep_var1_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_host_variant1_pkt *q2spi_hc_var1; + struct q2spi_dma_transfer *var1_xfer = NULL; + + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt:%p &q2spi_pkt->var1_pkt:%p\n", + __func__, q2spi_pkt->var1_pkt, &q2spi_pkt->var1_pkt); + var1_xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer)); + if (!var1_xfer) { + Q2SPI_ERROR(q2spi, "%s Err var1_xfer alloc failed\n", __func__); + return -ENOMEM; + } + var1_xfer->cmd = q2spi_pkt->m_cmd_param; + q2spi_hc_var1 = q2spi_pkt->var1_pkt; + var1_xfer->tx_buf = q2spi_pkt->var1_pkt; + var1_xfer->tx_dma = q2spi_pkt->var1_tx_dma; + var1_xfer->tx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4; + var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len; + Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", + __func__, var1_xfer->tx_len, var1_xfer->tx_data_len); + var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id; + if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) { + var1_xfer->tx_len = Q2SPI_HEADER_LEN; + Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", + __func__, var1_xfer->tx_len, var1_xfer->tx_data_len); + var1_xfer->rx_buf = q2spi->xfer->rx_buf; + var1_xfer->rx_dma = q2spi->xfer->rx_dma; + var1_xfer->rx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4; + var1_xfer->rx_len = var1_xfer->rx_data_len; + Q2SPI_DEBUG(q2spi, "%s var1_xfer->rx_len:%d var1_xfer->rx_data_len:%d\n", + __func__, var1_xfer->rx_len, var1_xfer->rx_data_len); + } + + Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, + var1_xfer->tx_buf, var1_xfer->tx_dma, var1_xfer->rx_buf, var1_xfer->rx_dma); + q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var1 tx_buf DMA TX", + (char *)var1_xfer->tx_buf, var1_xfer->tx_len); + q2spi->xfer = var1_xfer; + Q2SPI_DEBUG(q2spi, "%s xfer:%p\n", __func__, q2spi->xfer); + return 0; +} + +static int q2spi_prep_var5_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_host_variant4_5_pkt *q2spi_hc_var5; + struct q2spi_dma_transfer *var5_xfer = NULL; + + var5_xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer)); + if (!var5_xfer) { + Q2SPI_ERROR(q2spi, "%s Err var5_xfer alloc failed\n", __func__); + return -ENOMEM; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var5_pkt:%p var5_tx_dma:%p\n", + __func__, q2spi_pkt->var5_pkt, q2spi_pkt->var5_tx_dma); + q2spi_hc_var5 = q2spi_pkt->var5_pkt; + var5_xfer->cmd = q2spi_pkt->m_cmd_param; + var5_xfer->tx_buf = q2spi_pkt->var5_pkt; + var5_xfer->tx_dma = q2spi_pkt->var5_tx_dma; + var5_xfer->tid = q2spi_pkt->var5_pkt->flow_id; + var5_xfer->tx_data_len = q2spi_pkt->data_length; + var5_xfer->tx_len = Q2SPI_HEADER_LEN + var5_xfer->tx_data_len; + Q2SPI_DEBUG(q2spi, "%s var5_xfer->tx_len:%d var5_xfer->tx_data_len:%d\n", + __func__, var5_xfer->tx_len, var5_xfer->tx_data_len); + var5_xfer->tid = q2spi_pkt->var5_pkt->flow_id; + if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) { + var5_xfer->tx_buf = q2spi_pkt->var5_pkt; + var5_xfer->rx_buf = q2spi->xfer->rx_buf; + var5_xfer->rx_dma = q2spi->xfer->rx_dma; + var5_xfer->tx_len = Q2SPI_HEADER_LEN; + var5_xfer->rx_len = + ((q2spi_pkt->var5_pkt->dw_len_part1 | + q2spi_pkt->var5_pkt->dw_len_part2 << 2) * 4) + 4; + var5_xfer->rx_data_len = q2spi_pkt->data_length; + Q2SPI_DEBUG(q2spi, "%s var5_pkt:%p cmd:%d flow_id:0x%x len_part1:%d len_part2:%d\n", + __func__, q2spi_pkt->var5_pkt, q2spi_pkt->var5_pkt->cmd, + q2spi_pkt->var5_pkt->flow_id, q2spi_pkt->var5_pkt->dw_len_part1, + q2spi_pkt->var5_pkt->dw_len_part2); + Q2SPI_DEBUG(q2spi, "%s var5_pkt data_buf:0x%x var5_xfer->rx_len:%d\n", + __func__, q2spi_pkt->var5_pkt->data_buf, var5_xfer->rx_len); + } + Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", + __func__, var5_xfer->tx_buf, + var5_xfer->tx_dma, var5_xfer->rx_buf, var5_xfer->rx_dma); + q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var5 tx_buf DMA TX", + (char *)var5_xfer->tx_buf, Q2SPI_HEADER_LEN); + if (q2spi_pkt->m_cmd_param == Q2SPI_TX_ONLY) { + q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var5 data_buf DMA TX", + (void *)q2spi_pkt->var5_pkt->data_buf, var5_xfer->tx_data_len); + } + q2spi->xfer = var5_xfer; + return 0; +} + +static int q2spi_prep_hrf_request(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_host_variant1_pkt *q2spi_hc_var1; + struct q2spi_dma_transfer *var1_xfer = NULL; + + var1_xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer)); + if (!var1_xfer) { + Q2SPI_ERROR(q2spi, "%s Err var1_xfer alloc failed\n", __func__); + return -ENOMEM; + } + + q2spi_hc_var1 = q2spi_pkt->var1_pkt; + var1_xfer->cmd = Q2SPI_TX_ONLY; + var1_xfer->tx_buf = q2spi_pkt->var1_pkt; + var1_xfer->tx_dma = q2spi_pkt->var1_tx_dma; + var1_xfer->tx_data_len = 16; + var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len; + var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id; + var1_xfer->rx_buf = q2spi->rx_buf; + var1_xfer->rx_len = RX_DMA_CR_BUF_SIZE; + Q2SPI_DEBUG(q2spi, "%s var1_pkt:%p var1_pkt_phy:%p cmd:%d addr:0x%x flow_id:0x%x\n", + __func__, q2spi_pkt->var1_pkt, q2spi_pkt->var1_tx_dma, q2spi_pkt->var1_pkt->cmd, + q2spi_pkt->var1_pkt->reg_offset, q2spi_pkt->var1_pkt->flow_id); + Q2SPI_DEBUG(q2spi, "%s var1_pkt: len:%d data_buf %p\n", + __func__, q2spi_pkt->var1_pkt->dw_len, q2spi_pkt->var1_pkt->data_buf); + Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", + __func__, var1_xfer->tx_buf, var1_xfer->tx_dma, + var1_xfer->rx_buf, var1_xfer->rx_dma); + q2spi_dump_ipc(q2spi, q2spi->ipc, "Preparing var1_HRF DMA TX", + (char *)var1_xfer->tx_buf, var1_xfer->tx_len); + q2spi->xfer = var1_xfer; + return 0; +} + +static int +q2spi_process_hrf_flow_after_lra(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt) +{ + struct q2spi_cr_packet *q2spi_cr_pkt; + unsigned long timeout = 0, xfer_timeout = 0; + int ret = -1; + + Q2SPI_DEBUG(q2spi, "%s VAR1 wait for doorbell\n", __func__); + /* Make sure we get the doorbell before continuing for HRF flow */ + xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); + timeout = wait_for_completion_interruptible_timeout(&q2spi->doorbell_up, xfer_timeout); + if (timeout <= 0) { + Q2SPI_ERROR(q2spi, "%s Err timeout for doorbell_wait\n", __func__); + return ret; + } + + if (!list_empty(&q2spi->hc_cr_queue_list)) { + q2spi_cr_pkt = list_first_entry(&q2spi->hc_cr_queue_list, + struct q2spi_cr_packet, list); + Q2SPI_DEBUG(q2spi, "%s list_del q2spi_cr_pkt:%p\n", __func__, q2spi_cr_pkt); + list_del(&q2spi_cr_pkt->list); + } else { + Q2SPI_DEBUG(q2spi, "%s CR queue_list is empty\n", __func__); + return ret; + } + + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p hrf_id:%d cr_id:%d\n", __func__, + q2spi_pkt, q2spi_pkt->hrf_flow_id, q2spi_cr_pkt->var3_pkt.flow_id); + if (q2spi_pkt->hrf_flow_id == q2spi_cr_pkt->var3_pkt.flow_id) { + q2spi_pkt->vtype = VARIANT_5; + ret = q2spi_prep_var5_request(q2spi, q2spi_pkt); + if (ret) + return ret; + } + mutex_lock(&q2spi->gsi_lock); + ret = q2spi_gsi_submit(q2spi_pkt); + if (ret) { + Q2SPI_ERROR(q2spi, "q2spi_gsi_submit failed: %d\n", ret); + mutex_unlock(&q2spi->gsi_lock); + return ret; + } + mutex_unlock(&q2spi->gsi_lock); + return ret; +} + +/** + * __q2spi_send_messages - function which processes q2spi message queue + * @q2spi_geni: controller to process queue + * + * This function checks if there is any message in the queue that + * needs processing and if so call out to the driver to initialize hardware + * and transfer each message. + * + */ +static int __q2spi_send_messages(struct q2spi_geni *q2spi) +{ + struct q2spi_packet *q2spi_pkt; + int ret = 0; + + /* Check if the queue is idle */ + if (list_empty(&q2spi->tx_queue_list)) { + Q2SPI_DEBUG(q2spi, "%s Tx queue list is empty\n", __func__); + return 0; + } + + /* Check if we need take a lock and frame the Q2SPI packet */ + /* if the list is not empty call q2spi_gsi_transfer msg to submit the transfer to GSI */ + Q2SPI_DEBUG(q2spi, "%s list_empty:%d\n", __func__, list_empty(&q2spi->tx_queue_list)); + list_for_each_entry(q2spi_pkt, &q2spi->tx_queue_list, list) { + if (list_empty(&q2spi->tx_queue_list)) + break; + if (q2spi_pkt->in_use) { + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p in use\n", __func__, q2spi_pkt); + continue; + } + q2spi_pkt->in_use = true; + Q2SPI_DEBUG(q2spi, "%s send q2spi_pkt %p\n", __func__, q2spi_pkt); + if (q2spi_pkt->vtype == VARIANT_1) + ret = q2spi_prep_var1_request(q2spi, q2spi_pkt); + else if (q2spi_pkt->vtype == VARIANT_5) + ret = q2spi_prep_var5_request(q2spi, q2spi_pkt); + else if (q2spi_pkt->vtype == VARIANT_1_HRF) + ret = q2spi_prep_hrf_request(q2spi, q2spi_pkt); + + if (ret) + return ret; + Q2SPI_DEBUG(q2spi, "%s q2spi:%p xfer:%p\n", __func__, q2spi, q2spi->xfer); + q2spi_pkt->q2spi = q2spi; + mutex_lock(&q2spi->gsi_lock); + ret = q2spi_gsi_submit(q2spi_pkt); + if (ret) { + Q2SPI_ERROR(q2spi, "q2spi_gsi_submit failed: %d\n", ret); + mutex_unlock(&q2spi->gsi_lock); + return ret; + } + mutex_unlock(&q2spi->gsi_lock); + + if (q2spi_pkt->vtype == VARIANT_1_HRF) { + ret = q2spi_process_hrf_flow_after_lra(q2spi, q2spi_pkt); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err hrf_flow sma write fail ret %d\n", + __func__, ret); + return ret; + } + } + } + return 0; +} + +/** + * q2spi_send_messages - kthread work function which processes q2spi message queue + * @work: pointer to kthread work struct contained in the controller struct + * + */ +static void q2spi_send_messages(struct kthread_work *work) +{ + struct q2spi_geni *q2spi = container_of(work, struct q2spi_geni, send_messages); + int ret = 0; + + ret = __q2spi_send_messages(q2spi); + if (ret) + Q2SPI_DEBUG(q2spi, "%s Err send message failure ret=%d\n", __func__, ret); +} + +/** + * q2spi_proto_init - Q2SPI protocol specific initialization + * @q2spi: pointer to q2spi_geni driver data + * + * This function adds Q2SPi protocol specific configuration for + * cs less mode. + * + * Return: 0 on success. Error code on failure. + */ +static int q2spi_proto_init(struct q2spi_geni *q2spi) +{ + u32 q2spi_tx_cfg = geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG); + u32 io3_sel = geni_read_reg(q2spi->base, GENI_CFG_REG80); + u32 pre_post_dly = geni_read_reg(q2spi->base, SE_SPI_PRE_POST_CMD_DLY); + u32 word_len = geni_read_reg(q2spi->base, SE_SPI_WORD_LEN); + u32 spi_delay_reg = geni_read_reg(q2spi->base, SPI_DELAYS_COUNTERS); + u32 se_geni_cfg_95 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG95); + u32 se_geni_cfg_103 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG103); + u32 se_geni_cfg_104 = geni_read_reg(q2spi->base, SE_GENI_CFG_REG104); + int ret = 0; + + /* 3.2.2.10.1 Q2SPI Protocol Specific Configuration */ + /* Configure SE CLK */ + ret = q2spi_set_clock(q2spi, q2spi->max_speed_hz); + if (ret) { + Q2SPI_DEBUG(q2spi, "%s set clock failed\n", __func__); + goto exit; + } + q2spi_tx_cfg &= ~SPI_NOT_USED_CFG1; + geni_write_reg(q2spi_tx_cfg, q2spi->base, SE_SPI_TRANS_CFG); + io3_sel &= ~IO_MACRO_IO3_SEL; + geni_write_reg(io3_sel, q2spi->base, GENI_CFG_REG80); + spi_delay_reg |= (SPI_CS_CLK_DLY << M_GP_CNT5_TE2D_SHIFT) & M_GP_CNT5_TE2D; + spi_delay_reg |= (SPI_PIPE_DLY_TPM << M_GP_CNT6_CN_SHIFT) & M_GP_CNT6_CN; + spi_delay_reg |= SPI_INTER_WORDS_DLY & M_GP_CNT4_TAN; + geni_write_reg(spi_delay_reg, q2spi->base, SPI_DELAYS_COUNTERS); + se_geni_cfg_95 |= M_GP_CNT7_TSN & M_GP_CNT7; + geni_write_reg(se_geni_cfg_95, q2spi->base, SE_GENI_CFG_REG95); + Q2SPI_DEBUG(q2spi, "tx_cfg: 0x%x io3_sel:0x%x spi_delay: 0x%x cfg_95:0x%x\n", + geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG), + geni_read_reg(q2spi->base, GENI_CFG_REG80), + geni_read_reg(q2spi->base, SPI_DELAYS_COUNTERS), + geni_read_reg(q2spi->base, SE_GENI_CFG_REG95)); + se_geni_cfg_103 |= (S_GP_CNT5_TDN << S_GP_CNT5_SHIFT) & S_GP_CNT5; + se_geni_cfg_104 |= S_GP_CNT7_SSN & S_GP_CNT7; + geni_write_reg(se_geni_cfg_103, q2spi->base, SE_GENI_CFG_REG103); + geni_write_reg(se_geni_cfg_104, q2spi->base, SE_GENI_CFG_REG104); + + word_len &= ~WORD_LEN_MSK; + word_len |= MIN_WORD_LEN & WORD_LEN_MSK; + geni_write_reg(word_len, q2spi->base, SE_SPI_WORD_LEN); + Q2SPI_DEBUG(q2spi, "cfg_103: 0x%x cfg_104:0x%x pre_post_dly;0x%x spi_word_len:0x%x\n", + geni_read_reg(q2spi->base, SE_GENI_CFG_REG103), + geni_read_reg(q2spi->base, SE_GENI_CFG_REG104), + pre_post_dly, geni_read_reg(q2spi->base, SE_SPI_WORD_LEN)); + io3_sel &= ~OTHER_IO_OE; + io3_sel |= (IO_MACRO_IO3_DATA_IN_SEL << IO_MACRO_IO3_DATA_IN_SEL_SHIFT) & + IO_MACRO_IO3_DATA_IN_SEL_MASK; + Q2SPI_DEBUG(q2spi, "io3_sel:0x%x %x TPM:0x%x %d\n", io3_sel, + (IO_MACRO_IO3_DATA_IN_SEL & IO_MACRO_IO3_DATA_IN_SEL_MASK), + SPI_PIPE_DLY_TPM, SPI_PIPE_DLY_TPM << M_GP_CNT6_CN_SHIFT); + + q2spi->gsi_mode = (geni_read_reg(q2spi->base, GENI_IF_DISABLE_RO) & + FIFO_IF_DISABLE); + if (q2spi->gsi_mode) { + q2spi->xfer_mode = GENI_GPI_DMA; + geni_se_select_mode(&q2spi->se, GENI_GPI_DMA); + q2spi_geni_gsi_setup(q2spi); + } + + Q2SPI_DEBUG(q2spi, "%s gsi_mode:%d xfer_mode:%d\n", + __func__, q2spi->gsi_mode, q2spi->xfer_mode); + return 0; +exit: + return ret; +} + +/** + * q2spi_geni_init - Qupv3 and SE initialization + * @q2spi: pointer to q2spi_geni driver data + * + * This is done once per session. Make sure this api + * is called before any actual transfer begins as it involves + * generic SW/HW and Q2SPI protocol specific intializations + * required for a q2spi transfer. + * + * Return: 0 on success. Error code on failure. + */ +static int q2spi_geni_init(struct q2spi_geni *q2spi) +{ + int proto = 0; + unsigned int major; + unsigned int minor; + int ver; + int ret = 0; + + /* make sure to turn on the resources before this ex: pm_runtime_get_sync(q2spi->dev); */ + proto = geni_se_read_proto(&q2spi->se); + if (proto != GENI_SE_Q2SPI) { + Q2SPI_ERROR(q2spi, "Err Invalid proto %d\n", proto); + return -EINVAL; + } + + ver = geni_se_get_qup_hw_version(&q2spi->se); + major = GENI_SE_VERSION_MAJOR(ver); + minor = GENI_SE_VERSION_MINOR(ver); + Q2SPI_DEBUG(q2spi, "%s ver:0x%x major:%d minor:%d\n", __func__, ver, major, minor); + + if (major == 1 && minor == 0) + q2spi->oversampling = 2; + else + q2spi->oversampling = 1; + + /* Qupv3 Q2SPI protocol specific Initialization */ + q2spi_proto_init(q2spi); + + return ret; +} + +/** + * q2spi_get_icc_pinctrl - Enable ICC voting and pinctrl + * @pdev: pointer to Platform device + * @q2spi: pointer to q2spi_geni driver data + * + * This function will enable icc paths and add bandwidth voting + * and also get pinctrl state from DTSI. + * + * Return: 0 on success. Error code on failure. + */ +static int q2spi_get_icc_pinctrl(struct platform_device *pdev, + struct q2spi_geni *q2spi) +{ + struct geni_se *q2spi_rsc; + int ret = 0; + + q2spi_rsc = &q2spi->se; + /* ICC get */ + ret = geni_se_common_resources_init(q2spi_rsc, + Q2SPI_CORE2X_VOTE, APPS_PROC_TO_QUP_VOTE, + (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH)); + if (ret) { + Q2SPI_DEBUG(q2spi, "Error geni_se_resources_init\n"); + goto get_icc_pinctrl_err; + } + Q2SPI_DEBUG(q2spi, "%s GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n", + __func__, q2spi_rsc->icc_paths[GENI_TO_CORE].avg_bw, + q2spi_rsc->icc_paths[CPU_TO_GENI].avg_bw, + q2spi_rsc->icc_paths[GENI_TO_DDR].avg_bw); + + /* call set_bw for once, then do icc_enable/disable */ + ret = geni_icc_set_bw(q2spi_rsc); + if (ret) { + Q2SPI_DEBUG(q2spi, "%s icc set bw failed ret:%d\n", __func__, ret); + goto get_icc_pinctrl_err; + } + + /* to remove the votes doing icc enable/disable */ + ret = geni_icc_enable(q2spi_rsc); + if (ret) { + Q2SPI_DEBUG(q2spi, "%s icc enable failed ret:%d\n", __func__, ret); + goto get_icc_pinctrl_err; + } + + q2spi->geni_pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR_OR_NULL(q2spi->geni_pinctrl)) { + Q2SPI_DEBUG(q2spi, "No pinctrl config specified!\n"); + ret = PTR_ERR(q2spi->geni_pinctrl); + goto get_icc_pinctrl_err; + } + + q2spi->geni_gpio_active = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_DEFAULT); + if (IS_ERR_OR_NULL(q2spi->geni_gpio_active)) { + Q2SPI_DEBUG(q2spi, "No default config specified!\n"); + ret = PTR_ERR(q2spi->geni_gpio_active); + goto get_icc_pinctrl_err; + } + + q2spi->geni_gpio_sleep = pinctrl_lookup_state(q2spi->geni_pinctrl, PINCTRL_SLEEP); + if (IS_ERR_OR_NULL(q2spi->geni_gpio_sleep)) { + Q2SPI_DEBUG(q2spi, "No sleep config specified!\n"); + ret = PTR_ERR(q2spi->geni_gpio_sleep); + goto get_icc_pinctrl_err; + } + + ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_active); + if (ret) { + Q2SPI_DEBUG(q2spi, "Failed to set active configuration\n"); + goto get_icc_pinctrl_err; + } +get_icc_pinctrl_err: + return ret; +} + +/** + * q2spi_resource_init - q2spi Initialization + * @pdev: pointer to Platform device + * @q2spi: pointer to q2spi_geni driver data + * + * Initialize pinctrl and ICC, enable m_ahb, s_ahb and se_clk + * Initialize qupv3 core and SE. + * + * Return: 0 on success. Error code on failure. + */ +static int q2spi_resource_init(struct platform_device *pdev, struct q2spi_geni *q2spi) +{ + int ret = 0; + + /* ICC and PINCTRL initialization */ + ret = q2spi_get_icc_pinctrl(pdev, q2spi); + if (ret) { + Q2SPI_DEBUG(q2spi, "pinctrl get failed %d\n", ret); + return ret; + } + + /* Enable m_ahb, s_ahb and se clks */ + ret = geni_se_common_clks_on(q2spi->se.clk, q2spi->m_ahb_clk, q2spi->s_ahb_clk); + if (ret) { + Q2SPI_DEBUG(q2spi, "common_clk_on failed %d\n", ret); + return ret; + } + + /* QUPv3 and SE initialization */ + /* we can move this to part of transfer or runtime_resume once supported*/ + ret = q2spi_geni_init(q2spi); + if (ret) { + Q2SPI_DEBUG(q2spi, "Geni init failed %d\n", ret); + return ret; + } + return ret; +} + +/** + * q2spi_chardev_create - Allocate two character devices dinamically. + * @pdev: pointer to Platform device + * @q2spi: pointer to q2spi_geni driver data + * + * Allocates a range of char device numbers and adds a char + * device to the system and creates a device and registers + * it with sysfs. + * + * Return: 0 on success. Error code on failure. + */ + +static int q2spi_chardev_create(struct q2spi_geni *q2spi) +{ + int num_cdev_devs = 0; + int ret = 0, i; + + ret = alloc_chrdev_region(&q2spi->chrdev.q2spi_dev, 0, MAX_DEV, "q2spidev"); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "%s ret:%d\n", __func__, ret); + return ret; + } + q2spi_cdev_major = MAJOR(q2spi->chrdev.q2spi_dev); + q2spi->chrdev.q2spi_class = class_create(THIS_MODULE, "q2spidev"); + if (IS_ERR(q2spi->chrdev.q2spi_class)) { + Q2SPI_DEBUG(q2spi, "%s ret:%d\n", __func__, PTR_ERR(q2spi->chrdev.q2spi_class)); + ret = PTR_ERR(q2spi->chrdev.q2spi_class); + goto err_class_create; + } + + for (i = 0; i < MAX_DEV; i++) { + cdev_init(&q2spi->chrdev.cdev[i], &q2spi_fops); + q2spi->chrdev.cdev[i].owner = THIS_MODULE; + q2spi->chrdev.major = q2spi_cdev_major; + q2spi->chrdev.minor = num_cdev_devs; + ret = cdev_add(&q2spi->chrdev.cdev[i], MKDEV(q2spi_cdev_major, i), 1); + if (ret) { + Q2SPI_DEBUG(q2spi, "cdev_add failed ret:%d\n", ret); + goto err_cdev_add; + } + + if (i) { + q2spi->chrdev.class_dev = device_create(q2spi->chrdev.q2spi_class, NULL, + MKDEV(q2spi_cdev_major, i), + NULL, "q2spibt"); + } else { + q2spi->chrdev.class_dev = device_create(q2spi->chrdev.q2spi_class, NULL, + MKDEV(q2spi_cdev_major, i), + NULL, "q2spiuwb"); + } + + if (IS_ERR(q2spi->chrdev.class_dev)) { + ret = PTR_ERR(q2spi->chrdev.class_dev); + Q2SPI_DEBUG(q2spi, "failed to create device\n"); + goto err_dev_create; + } + Q2SPI_DEBUG(q2spi, "%s q2spi:%p chrdev:%p cdev:%p i:%d end\n", + __func__, q2spi, q2spi->chrdev, q2spi->chrdev.cdev[i], i); + num_cdev_devs++; + } + + return 0; +err_dev_create: + class_destroy(q2spi->chrdev.q2spi_class); +err_class_create: + for (i = 0; i < MAX_DEV; i++) + cdev_del(&q2spi->chrdev.cdev[i]); +err_cdev_add: + unregister_chrdev_region(MKDEV(q2spi_cdev_major, 0), MINORMASK); + return ret; +} + +/** + * q2spi_read_reg - read a register of host accesible client register + * @q2spi: Pointer to main q2spi_geni structure. + * @reg_offset: specifies register address of the client to be read. + * + * This function used to read register of a client specified. + * It frame local register access command and submit to gsi and + * wait for gsi completion. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset) +{ + struct q2spi_packet *q2spi_pkt = NULL; + struct q2spi_dma_transfer *xfer; + struct q2spi_request q2spi_req; + unsigned long timeout = 0, xfer_timeout = 0; + int ret = 0; + + q2spi_req.cmd = LOCAL_REG_READ; + q2spi_req.addr = reg_offset; + q2spi_req.data_len = 4; /* In bytes */ + + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p &q2spi_pkt=%p\n", __func__, q2spi_pkt, &q2spi_pkt); + ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt); + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); + Q2SPI_DEBUG(q2spi, "flow_id:%d\n", ret); + if (ret < 0) { + Q2SPI_DEBUG(q2spi, "q2spi_frame_lra failed ret:%d\n", ret); + return ret; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); + xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer)); + if (!xfer) { + Q2SPI_DEBUG(q2spi, "%s Err alloc failed\n", __func__); + ret = -ENOMEM; + return ret; + } + xfer->tx_buf = q2spi_pkt->var1_pkt; + xfer->tx_dma = q2spi_pkt->var1_tx_dma; + xfer->rx_buf = q2spi->xfer->rx_buf; + xfer->rx_dma = q2spi->xfer->rx_dma; + xfer->cmd = q2spi_pkt->m_cmd_param; + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p cmd:%d\n", __func__, q2spi_pkt, xfer->cmd); + xfer->tx_data_len = q2spi_req.data_len; + xfer->tx_len = Q2SPI_HEADER_LEN; + xfer->rx_data_len = q2spi_req.data_len; + xfer->rx_len = xfer->rx_data_len; + xfer->tid = q2spi_pkt->var1_pkt->flow_id; + reinit_completion(&q2spi->sync_wait); + + Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", + __func__, xfer->tx_buf, xfer->tx_dma, xfer->rx_buf, xfer->rx_dma, xfer->tx_len, + xfer->rx_len); + q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi read reg tx_buf DMA TX", + (char *)xfer->tx_buf, xfer->tx_len); + q2spi->xfer = xfer; + q2spi_pkt->q2spi = q2spi; + mutex_lock(&q2spi->gsi_lock); + ret = q2spi_gsi_submit(q2spi_pkt); + if (ret) { + Q2SPI_DEBUG(q2spi, "Err q2spi_gsi_submit failed: %d\n", ret); + mutex_unlock(&q2spi->gsi_lock); + return ret; + } + mutex_unlock(&q2spi->gsi_lock); + xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); + timeout = wait_for_completion_interruptible_timeout(&q2spi->sync_wait, xfer_timeout); + if (timeout <= 0) { + Q2SPI_ERROR(q2spi, "%s Err timeout for sync_wait\n", __func__); + return -ETIMEDOUT; + } + Q2SPI_DEBUG(q2spi, "Reg:0x%x Read Val = 0x%x\n", reg_offset, *(unsigned int *)xfer->rx_buf); + return ret; +} + +/** + * q2spi_write_reg - write a register of host accesible client register + * @q2spi: Pointer to main q2spi_geni structure. + * @reg_offset: specifies register address of the client to be write. + * @data: spefies value of the register to be write. + * + * This function used to write to a register of a client specified. + * It frame local register access command and submit to gsi and + * wait for gsi completion. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned long data) +{ + struct q2spi_packet *q2spi_pkt; + struct q2spi_dma_transfer *xfer; + struct q2spi_request q2spi_req; + unsigned long timeout = 0, xfer_timeout = 0; + int ret = 0; + + q2spi_req.cmd = LOCAL_REG_WRITE; + q2spi_req.addr = reg_offset; + q2spi_req.data_len = 4; + q2spi_req.data_buff = &data; + ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt); + if (ret < 0) { + Q2SPI_ERROR(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); + return ret; + } + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); + xfer = q2spi_kzalloc(q2spi, sizeof(struct q2spi_dma_transfer)); + if (!xfer) { + Q2SPI_ERROR(q2spi, "%s Err xfer alloc failed\n", __func__); + ret = -ENOMEM; + return ret; + } + xfer->tx_buf = q2spi_pkt->var1_pkt; + xfer->tx_dma = q2spi_pkt->var1_tx_dma; + xfer->cmd = q2spi_pkt->m_cmd_param; + xfer->tx_data_len = q2spi_req.data_len; + xfer->tx_len = Q2SPI_HEADER_LEN + xfer->tx_data_len; + xfer->tid = q2spi_pkt->var1_pkt->flow_id; + + Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", + __func__, xfer->tx_buf, xfer->tx_dma, xfer->rx_buf, xfer->rx_dma, xfer->tx_len, + xfer->rx_len); + Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt:0x%x q2spi_pkt->var1_pkt_add:0x%x\n", + __func__, q2spi_pkt->var1_pkt, &q2spi_pkt->var1_pkt); + q2spi_dump_ipc(q2spi, q2spi->ipc, "q2spi_read_reg tx_buf DMA TX", + (char *)xfer->tx_buf, xfer->tx_len); + q2spi->xfer = xfer; + q2spi_pkt->q2spi = q2spi; + reinit_completion(&q2spi->sync_wait); + mutex_lock(&q2spi->gsi_lock); + ret = q2spi_gsi_submit(q2spi_pkt); + if (ret) { + Q2SPI_DEBUG(q2spi, "q2spi_gsi_submit failed: %d\n", ret); + mutex_unlock(&q2spi->gsi_lock); + return ret; + } + mutex_unlock(&q2spi->gsi_lock); + Q2SPI_DEBUG(q2spi, "wait here\n"); + xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); + timeout = wait_for_completion_interruptible_timeout(&q2spi->sync_wait, xfer_timeout); + if (timeout <= 0) { + Q2SPI_DEBUG(q2spi, "%s Err timeout for sync_wait\n", __func__); + return -ETIMEDOUT; + } + + Q2SPI_DEBUG(q2spi, "%s write to reg success ret:%s\n", __func__, ret); + Q2SPI_DEBUG(q2spi, "write success: %d\n", ret); + return ret; +} + +/** + * q2spi_slave_init - Initialization sequence + * @q2spi: Pointer to main q2spi_geni structure + * + * This function performs init sequence with q2spi slave + * send host command to check client enabled or not + * read Q2SPI_HOST_CFG.DOORBELL_EN register info from slave + * Write 1 to each bit of Q2SPI_ERROR_EN to enable error interrupt to Host using doorbell. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_slave_init(struct q2spi_geni *q2spi) +{ + unsigned long scratch_data = 0xAAAAAAAA; + unsigned long error_en_data = 0xFFFFFFFF; + int ret = 0, value = 0; + int retries = RETRIES; + + Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_SCRATCH0); + return 0; + /* Dummy SCRATCH register write */ + ret = q2spi_write_reg(q2spi, Q2SPI_SCRATCH0, scratch_data); + if (ret) { + Q2SPI_ERROR(q2spi, "scratch0 write failed: %d\n", ret); + return ret; + } + + /* Dummy SCRATCH register read */ + Q2SPI_DEBUG(q2spi, "%s reg: 0x%x\n", __func__, Q2SPI_SCRATCH0); + ret = q2spi_read_reg(q2spi, Q2SPI_SCRATCH0); + if (ret) { + Q2SPI_ERROR(q2spi, "Err scratch0 read failed: %d\n", ret); + return ret; + } + + /* + * Send dummy Host command until Client is enabled. + * Dummy command can be reading Q2SPI_HW_VERSION register. + */ + while (retries > 0 && value <= 0) { + value = q2spi_read_reg(q2spi, Q2SPI_HW_VERSION); + Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); + if (value <= 0) + Q2SPI_DEBUG(q2spi, "HW_Version read failed: %d\n", ret); + retries--; + Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); + } + + Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_HOST_CFG); + ret = q2spi_read_reg(q2spi, Q2SPI_HOST_CFG); + if (ret) { + Q2SPI_ERROR(q2spi, "Err HOST CFG read failed: %d\n", ret); + return ret; + } + + Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); + ret = q2spi_write_reg(q2spi, Q2SPI_ERROR_EN, error_en_data); + if (ret) { + Q2SPI_ERROR(q2spi, "Err Error_en reg write failed: %d\n", ret); + return ret; + } + + Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); + ret = q2spi_read_reg(q2spi, Q2SPI_ERROR_EN); + if (ret) { + Q2SPI_ERROR(q2spi, "Err HOST CFG read failed: %d\n", ret); + return ret; + } + return 0; +} + +/** + * q2spi_clks_get - get SE and AHB clks + * @q2spi: Pointer to main q2spi_geni structure + * + * This function will get clock resources for SE, M-AHB and S_AHB clocks. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_clks_get(struct q2spi_geni *q2spi) +{ + int ret = 0; + + q2spi->se_clk = devm_clk_get(q2spi->dev, "se-clk"); + if (ret) { + Q2SPI_ERROR(q2spi, "Err getting SE clk %d\n", ret); + return ret; + } + q2spi->se.clk = q2spi->se_clk; + + q2spi->m_ahb_clk = devm_clk_get(q2spi->dev->parent, "m-ahb"); + if (IS_ERR(q2spi->m_ahb_clk)) { + ret = PTR_ERR(q2spi->m_ahb_clk); + Q2SPI_ERROR(q2spi, "Err getting Main AHB clk %d\n", ret); + return ret; + } + + q2spi->s_ahb_clk = devm_clk_get(q2spi->dev->parent, "s-ahb"); + if (IS_ERR(q2spi->s_ahb_clk)) { + ret = PTR_ERR(q2spi->s_ahb_clk); + Q2SPI_ERROR(q2spi, "Err getting Secondary AHB clk %d\n", ret); + return ret; + } + return 0; +} + +int q2spi_send_system_mem_access(struct q2spi_geni *q2spi) +{ + struct q2spi_request q2spi_req; + struct q2spi_cr_packet *q2spi_cr_pkt = q2spi->cr_pkt; + int ret; + unsigned int dw_len; + + dw_len = (((q2spi_cr_pkt->var3_pkt.dw_len_part3 << 12) & 0xFF) | + ((q2spi_cr_pkt->var3_pkt.dw_len_part2 << 4) & 0xFF) | + q2spi_cr_pkt->var3_pkt.dw_len_part1); + q2spi_req.data_len = (dw_len * 4) + 4; + Q2SPI_DEBUG(q2spi, "%s dw_len:%d data_len:%d\n", __func__, dw_len, q2spi_req.data_len); + q2spi_req.data_buff = q2spi_kzalloc(q2spi, q2spi_req.data_len); + if (!q2spi_req.data_buff) { + Q2SPI_DEBUG(q2spi, "%s Err request data_buff NULL\n", __func__); + return -ENOMEM; + } + + q2spi_req.cmd = DATA_READ; + q2spi_req.addr = 0; + q2spi_req.end_point = 0; + q2spi_req.proto_ind = 0; + q2spi_req.priority = 0; + q2spi_req.flow_id = q2spi->cr_pkt->var3_pkt.flow_id; + q2spi_req.sync = 0; + mutex_lock(&q2spi->queue_lock); + ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req); + mutex_unlock(&q2spi->queue_lock); + kthread_queue_work(q2spi->kworker, &q2spi->send_messages); + + return ret; +} + +/* + * q2spi_handle_doorbell_work() - worker function which handles doorbell flow for q2spi + * + * @work: pointer to work_struct + * + * Return: None + */ +static void q2spi_handle_doorbell_work(struct work_struct *work) +{ + struct q2spi_geni *q2spi = + container_of(work, struct q2spi_geni, q2spi_doorbell_work); + struct q2spi_cr_packet *q2spi_cr_pkt = NULL; + unsigned long flags; + int ret = 0, i = 0, no_of_crs = 0; + u8 *ptr; + bool wakeup_hrf = true; + + Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DEBUG(q2spi, "%s q2spi:%p q2spi_cr_pkt:%p\n", __func__, q2spi, q2spi_cr_pkt); + ret = q2spi_prepare_cr_pkt(q2spi); + if (ret) { + Q2SPI_DEBUG(q2spi, "q2spi_prepare_cr_pkt failed %d\n", ret); + return; + } + q2spi_cr_pkt = q2spi->cr_pkt; + /* wait for RX dma channel TCE 0x22 to get CR body in RX DMA buffer */ + ret = check_gsi_transfer_completion_rx(q2spi); + if (ret) { + Q2SPI_DEBUG(q2spi, "%s completion timeout: %d\n", __func__, ret); + return; + } + + Q2SPI_DEBUG(q2spi, "%s q2spi:%p q2spi_cr_pkt:%p\n", + __func__, q2spi, q2spi_cr_pkt); + + no_of_crs = q2spi_cr_pkt->no_of_valid_crs; + Q2SPI_DEBUG(q2spi, "%s q2spi:%p q2spi_db_xfer:%p db_xfer_rx_buf:%p\n", + __func__, q2spi, q2spi->db_xfer, q2spi->db_xfer->rx_buf); + + q2spi_cr_pkt->type = 0; + ptr = (u8 *)q2spi->db_xfer->rx_buf; + for (i = 0; i < no_of_crs; i++) { + if (q2spi_cr_pkt->cr_hdr[i]->cmd == BULK_ACCESS_STATUS) { + q2spi_cr_pkt->bulk_pkt.cmd = q2spi_cr_pkt->cr_hdr[i]->cmd; + q2spi_cr_pkt->bulk_pkt.flow = q2spi_cr_pkt->cr_hdr[i]->flow; + q2spi_cr_pkt->bulk_pkt.parity = q2spi_cr_pkt->cr_hdr[i]->parity; + q2spi_dump_ipc(q2spi, q2spi->ipc, "DB BULK DMA RX", + (char *)ptr, q2spi->db_xfer->rx_len); + q2spi_cr_pkt->bulk_pkt.status = ptr[0] & 0xF; + q2spi_cr_pkt->bulk_pkt.flow_id = ptr[0] >> 4; + ptr += CR_BULK_DATA_size; + Q2SPI_DEBUG(q2spi, "%s i:%d q2spi_cr_pkt->type:0x%x\n", + __func__, i, q2spi_cr_pkt->type); + q2spi_cr_pkt->type |= (1 << (2 * i)); + Q2SPI_DEBUG(q2spi, "%s i:%d q2spi_cr_pkt->type:0x%x flow_id:%d\n", + __func__, i, q2spi_cr_pkt->type, + q2spi_cr_pkt->bulk_pkt.flow_id); + } else if ((q2spi_cr_pkt->cr_hdr[i]->cmd == ADDR_LESS_WR_ACCESS) || + (q2spi_cr_pkt->cr_hdr[i]->cmd == ADDR_LESS_RD_ACCESS)) { + memcpy((void *)&q2spi_cr_pkt->var3_pkt, (void *)ptr, + sizeof(struct q2spi_client_dma_pkt)); + q2spi_dump_ipc(q2spi, q2spi->ipc, "DB VAR3 DMA RX", + (char *)ptr, q2spi->db_xfer->rx_len); + ptr += CR_DMA_DATA_size; + Q2SPI_DEBUG(q2spi, "%s i:%d q2spi_cr_pkt->type:0x%x\n", + __func__, i, q2spi_cr_pkt->type); + q2spi_cr_pkt->type |= (2 << (2 * i)); + Q2SPI_DEBUG(q2spi, "%s i:%d q2spi_cr_pkt->type:0x%x\n", + __func__, i, q2spi_cr_pkt->type); + Q2SPI_DEBUG(q2spi, "%s var3_pkt:%p var3_flow_id:%d\n", + __func__, q2spi_cr_pkt->var3_pkt, + q2spi_cr_pkt->var3_pkt.flow_id); + Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d\n", __func__, + q2spi_cr_pkt->var3_pkt.dw_len_part1, + q2spi_cr_pkt->var3_pkt.dw_len_part2); + } + } + + q2spi_unmap_dma_buf_used(q2spi, (dma_addr_t)NULL, q2spi->db_xfer->rx_dma); + q2spi->db_xfer->rx_dma = (dma_addr_t)NULL; + + for (i = 0; i < no_of_crs; i++) { + Q2SPI_DEBUG(q2spi, "%s i=%d CR Header CMD 0x%x\n", + __func__, i, q2spi_cr_pkt->cr_hdr[i]->cmd); + if (q2spi_cr_pkt->cr_hdr[i]->cmd == ADDR_LESS_WR_ACCESS || + q2spi_cr_pkt->cr_hdr[i]->cmd == ADDR_LESS_RD_ACCESS) { + spin_lock_irqsave(&q2spi->cr_queue_lock, flags); + if (q2spi_cr_pkt->cr_hdr[i]->flow) { + Q2SPI_DEBUG(q2spi, + "%s Add cr_pkt to cr_queue_list q2spi_cr_pkt:%p opcode:%d\n", + __func__, q2spi_cr_pkt, q2spi_cr_pkt->cr_hdr[i]->cmd); + list_add_tail(&q2spi_cr_pkt->list, &q2spi->cr_queue_list); + } else { + Q2SPI_DEBUG(q2spi, + "%s Add cr_pkt to hc_cr_queue_list q2spi_cr_pkt:%p opcode:%d\n", + __func__, q2spi_cr_pkt, q2spi_cr_pkt->cr_hdr[i]->cmd); + list_add_tail(&q2spi_cr_pkt->list, &q2spi->hc_cr_queue_list); + } + spin_unlock_irqrestore(&q2spi->cr_queue_lock, flags); + + if (q2spi_cr_pkt->cr_hdr[i]->flow) { + Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d len_part3:%d\n", + __func__, q2spi_cr_pkt->var3_pkt.dw_len_part1, + q2spi_cr_pkt->var3_pkt.dw_len_part2, + q2spi_cr_pkt->var3_pkt.dw_len_part3); + q2spi_send_system_mem_access(q2spi); + /* + * wait for RX dma channel TCE 0x22 to + * get CR body in RX DMA buffer + */ + ret = check_gsi_transfer_completion_rx(q2spi); + if (ret) + Q2SPI_DEBUG(q2spi, "%s completion timeout: %d\n", + __func__, ret); + } else { + if (q2spi_cr_pkt->cr_hdr[i]->cmd == + ADDR_LESS_WR_ACCESS && wakeup_hrf) { + /* wakeup HRF flow which is waiting for this CR doorbell */ + complete_all(&q2spi->doorbell_up); + Q2SPI_DEBUG(q2spi, "%s cmd: %d Got doorbell CR Host flow\n", + __func__, q2spi_cr_pkt->cr_hdr[i]->cmd); + wakeup_hrf = false; + } + } + } else if (q2spi_cr_pkt->cr_hdr[i]->cmd == BULK_ACCESS_STATUS) { + if (q2spi_cr_pkt->bulk_pkt.flow_id >= 0x8) { + Q2SPI_DEBUG(q2spi, "%s Bulk status with Client Flow ID\n", + __func__); + q2spi_notify_data_avail_for_client(q2spi); + } else { + Q2SPI_DEBUG(q2spi, "%s Bulk status with host Flow ID:%d\n", + __func__, q2spi_cr_pkt->bulk_pkt.flow_id); + complete_all(&q2spi->sync_wait); + } + } + + /* + * get one rx buffer from allocated pool and + * map to gsi to ready for next doorbell. + */ + ret = q2spi_map_doorbell_rx_buf(q2spi); + if (ret) { + Q2SPI_DEBUG(q2spi, "failed to alloc RX DMA buf"); + return; + } + } + Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); +} + +/** + * q2spi_geni_probe - Q2SPI interface driver probe function + * @pdev: Q2SPI Serial Engine to probe. + * + * Allocates basic resources for QUPv3 SE which supports q2spi + * and then register a range of char device numbers. Also + * invoke methods for Qupv3 SE and Q2SPI protocol + * specific Initialization. + * + * Return: 0 for success, negative number for error condition. + */ +static int q2spi_geni_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + struct q2spi_geni *q2spi; + int ret = 0; + + pr_info("%s start PID=%d\n", __func__, current->pid); + + q2spi = devm_kzalloc(dev, sizeof(*q2spi), GFP_KERNEL); + if (!q2spi) + return -ENOMEM; + + q2spi->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Err getting IO region\n"); + return -EINVAL; + } + + q2spi->base = devm_ioremap_resource(dev, res); + if (IS_ERR(q2spi->base)) { + ret = PTR_ERR(q2spi->base); + dev_err(dev, "Err ioremap fail %d\n", ret); + return ret; + } + + q2spi->irq = platform_get_irq(pdev, 0); + if (q2spi->irq < 0) { + dev_err(dev, "Err for irq get %d\n", ret); + return q2spi->irq; + } + + irq_set_status_flags(q2spi->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(dev, q2spi->irq, q2spi_geni_irq, + IRQF_TRIGGER_HIGH, dev_name(dev), q2spi); + if (ret) { + dev_err(dev, "Err Failed to request irq %d\n", ret); + return ret; + } + + q2spi->se.dev = dev; + q2spi->se.wrapper = dev_get_drvdata(dev->parent); + if (!q2spi->se.wrapper) { + dev_err(dev, "Err SE Wrapper is NULL, deferring probe\n"); + return -EPROBE_DEFER; + } + + q2spi->ipc = ipc_log_context_create(15, dev_name(dev), 0); + if (!q2spi->ipc && IS_ENABLED(CONFIG_IPC_LOGGING)) + dev_err(dev, "Error creating IPC logs\n"); + + q2spi->se.base = q2spi->base; + if (of_property_read_u32(pdev->dev.of_node, "q2spi-max-frequency", + &q2spi->max_speed_hz)) { + Q2SPI_ERROR(q2spi, "Err Max frequency not specified\n"); + ret = -EINVAL; + goto probe_err; + } + + Q2SPI_INFO(q2spi, "%s max_speed:%u\n", __func__, q2spi->max_speed_hz); + q2spi->wrapper_dev = dev->parent; + Q2SPI_INFO(q2spi, "q2spi:%p q2spi_cdev:%p w_dev:%p, dev:%p, p_dev:%p dev_name:%s", + q2spi, q2spi->chrdev, q2spi->wrapper_dev, dev, &pdev->dev, + dev_name(q2spi->dev)); + + ret = dma_set_mask_and_coherent(dev, (u64)DMA_BIT_MASK(48)); + if (ret) { + Q2SPI_INFO(q2spi, "%s dma_set_mask_and_coherent with DMA_BIT_MASK(48) failed", + __func__); + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + Q2SPI_ERROR(q2spi, "Err could not set DMA mask\n"); + goto probe_err; + } + } + ret = q2spi_chardev_create(q2spi); + if (ret) + goto probe_err; + + ret = q2spi_clks_get(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "Err clks get failed\n"); + return ret; + } + + ret = q2spi_resource_init(pdev, q2spi); + if (ret) + goto probe_err; + + init_waitqueue_head(&q2spi->readq); + init_waitqueue_head(&q2spi->read_wq); + INIT_LIST_HEAD(&q2spi->tx_queue_list); + INIT_LIST_HEAD(&q2spi->rx_queue_list); + INIT_LIST_HEAD(&q2spi->cr_queue_list); + INIT_LIST_HEAD(&q2spi->hc_cr_queue_list); + mutex_init(&q2spi->gsi_lock); + spin_lock_init(&q2spi->txn_lock); + mutex_init(&q2spi->queue_lock); + spin_lock_init(&q2spi->cr_queue_lock); + + q2spi->kworker = kthread_create_worker(0, "kthread_q2spi"); + if (IS_ERR(q2spi->kworker)) { + Q2SPI_ERROR(q2spi, "Err failed to create message pump kworker\n"); + return PTR_ERR(q2spi->kworker); + } + kthread_init_work(&q2spi->send_messages, q2spi_send_messages); + init_completion(&q2spi->tx_cb); + init_completion(&q2spi->rx_cb); + init_completion(&q2spi->doorbell_up); + init_completion(&q2spi->sync_wait); + idr_init(&q2spi->tid_idr); + + /* Pre allocate buffers for transfers */ + ret = q2spi_pre_alloc_buffers(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "Err failed to alloc buffers"); + goto probe_err; + } + q2spi->xfer = devm_kzalloc(q2spi->dev, sizeof(struct q2spi_dma_transfer), GFP_KERNEL); + if (!q2spi->xfer) { + Q2SPI_ERROR(q2spi, "Err failed to alloc xfer buffer"); + goto probe_err; + } + + q2spi->db_xfer = devm_kzalloc(q2spi->dev, sizeof(struct q2spi_dma_transfer), GFP_KERNEL); + if (!q2spi->db_xfer) { + Q2SPI_ERROR(q2spi, "Err failed to alloc db_xfer buffer"); + goto probe_err; + } + + q2spi->doorbell_wq = alloc_workqueue("%s", WQ_HIGHPRI, 1, dev_name(dev)); + if (!q2spi->doorbell_wq) { + Q2SPI_ERROR(q2spi, "Err failed to allocate workqueue"); + destroy_workqueue(q2spi->doorbell_wq); + return -ENOMEM; + } + INIT_WORK(&q2spi->q2spi_doorbell_work, q2spi_handle_doorbell_work); + + dev_dbg(dev, "Q2SPI GENI SE Driver probed\n"); + + platform_set_drvdata(pdev, q2spi); + q2spi->init = false; + + Q2SPI_INFO(q2spi, "%s Q2SPI GENI SE Driver probed\n", __func__); + return 0; + +probe_err: + Q2SPI_ERROR(q2spi, "%s Err Probe ret:%d\n", __func__, ret); + return ret; +} + +static int q2spi_geni_remove(struct platform_device *pdev) +{ + struct q2spi_geni *q2spi = platform_get_drvdata(pdev); + int i, ret; + + Q2SPI_DEBUG(q2spi, "%s q2spi=0x%p\n", __func__, q2spi); + if (q2spi->doorbell_wq) + destroy_workqueue(q2spi->doorbell_wq); + if (q2spi->kworker) { + kthread_destroy_worker(q2spi->kworker); + q2spi->kworker = NULL; + } + for (i = 0; i < MAX_DEV; i++) + device_destroy(q2spi->chrdev.q2spi_class, MKDEV(q2spi_cdev_major, i)); + class_unregister(q2spi->chrdev.q2spi_class); + class_destroy(q2spi->chrdev.q2spi_class); + unregister_chrdev_region(MKDEV(q2spi_cdev_major, 0), MINORMASK); + ret = q2spi_free_dma_buf(q2spi); + if (ret) { + Q2SPI_ERROR(q2spi, "%s Err Probe ret:%d\n", __func__, ret); + return ret; + } + + if (q2spi->ipc) + ipc_log_context_destroy(q2spi->ipc); + return 0; +} + +static int q2spi_geni_runtime_suspend(struct device *dev) +{ + pr_err("%s PID=%d\n", __func__, current->pid); + return 0; +} + +static int q2spi_geni_runtime_resume(struct device *dev) +{ + pr_err("%s PID=%d\n", __func__, current->pid); + return 0; +} + +static int q2spi_geni_resume(struct device *dev) +{ + pr_err("%s PID=%d\n", __func__, current->pid); + return 0; +} + +static int q2spi_geni_suspend(struct device *dev) +{ + pr_err("%s PID=%d\n", __func__, current->pid); + return 0; +} + +static const struct dev_pm_ops q2spi_geni_pm_ops = { + SET_RUNTIME_PM_OPS(q2spi_geni_runtime_suspend, + q2spi_geni_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(q2spi_geni_suspend, q2spi_geni_resume) +}; + +static const struct of_device_id q2spi_geni_dt_match[] = { + { .compatible = "qcom,q2spi-msm-geni" }, + {} +}; +MODULE_DEVICE_TABLE(of, q2spi_geni_dt_match); + +static struct platform_driver q2spi_geni_driver = { + .probe = q2spi_geni_probe, + .remove = q2spi_geni_remove, + .driver = { + .name = "q2spi_msm_geni", + .pm = &q2spi_geni_pm_ops, + .of_match_table = q2spi_geni_dt_match, + }, +}; + +static int __init q2spi_dev_init(void) +{ + int ret = 0; + + pr_info("%s PID=%d\n", __func__, current->pid); + + ret = platform_driver_register(&q2spi_geni_driver); + if (ret) + pr_err("register platform driver failed, ret [%d]\n", ret); + + pr_err("%s end ret:%d\n", __func__, ret); + return ret; +} + +static void __exit q2spi_dev_exit(void) +{ + pr_info("%s PID=%d\n", __func__, current->pid); + platform_driver_unregister(&q2spi_geni_driver); +} + +module_init(q2spi_dev_init); +module_exit(q2spi_dev_exit); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:q2spi_geni"); diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h new file mode 100644 index 000000000000..5d8250b698e2 --- /dev/null +++ b/drivers/spi/q2spi-msm.h @@ -0,0 +1,556 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _SPI_Q2SPI_H_ +#define _SPI_Q2SPI_H_ + +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include +#include +#include "q2spi-gsi.h" + +#define DATA_WORD_LEN 4 +#define SMA_BUF_SIZE 4096 +#define MAX_CR_SIZE 24 /* Max CR size is 24 bytes per CR */ +#define MAX_RX_CRS 4 +#define RX_DMA_CR_BUF_SIZE (MAX_CR_SIZE * MAX_RX_CRS) +#define Q2SPI_MAX_BUF 2 +#define XFER_TIMEOUT_OFFSET 500 +#define TIMEOUT_MSECONDS 10 /* 10 milliseconds */ +#define RETRIES 1 +#define Q2SPI_MAX_DATA_LEN 4096 + +/* Host commands */ +#define HC_DB_REPORT_LEN_READ 1 +#define HC_DB_REPORT_BODY_READ 2 +#define HC_ABORT 3 +#define HC_DATA_READ 5 +#define HC_DATA_WRITE 6 +#define HC_SMA_READ 5 +#define HC_SMA_WRITE 6 +#define HC_SOFT_RESET 0xF +#define CM_FLOW 1 +#define MC_FLOW 0 +#define CLIENT_INTERRUPT 1 //need to confirm this +#define SEGMENT_LST 1 //need to confirm this +#define LOCAL_REG_ACCESS 0 +#define SYSTEM_MEMORY_ACCESS 1 +#define CLIENT_ADDRESS 1 +#define NO_CLIENT_ADDRESS 0 + +#define HC_SOFT_RESET_FLAGS 0xF +#define HC_SOFT_RESET_CODE 0x2 + +/* Client Requests */ +#define ADDR_LESS_WR_ACCESS 3 +#define ADDR_LESS_RD_ACCESS 4 +#define BULK_ACCESS_STATUS 8 + +#define Q2SPI_HEADER_LEN 7 //7 bytes header excluding checksum we use in SW +#define DMA_Q2SPI_SIZE 2048 +#define MAX_DW_LEN_1 4 // 4DWlen +#define MAX_DW_LEN_2 1024 //for 1K DWlen +#define CS_LESS_MODE 0 +#define INTR_HIGH_POLARITY 1 + +#define MAX_TX_SG (3) +#define NUM_Q2SPI_XFER (10) +#define Q2SPI_START_TID_ID (0) +#define Q2SPI_END_TID_ID (8) + +/* Q2SPI specific SE GENI registers */ +#define IO_MACRO_IO3_DATA_IN_SEL_MASK GENMASK(15, 14) +#define IO_MACRO_IO3_DATA_IN_SEL_SHIFT 14 +#define IO_MACRO_IO3_DATA_IN_SEL 1 +#define SE_SPI_TRANS_CFG 0x25c +#define CS_TOGGLE BIT(1) +#define SPI_NOT_USED_CFG1 BIT(2) +#define SE_SPI_PRE_POST_CMD_DLY 0x274 +#define SPI_DELAYS_COUNTERS 0x278 +#define M_GP_CNT4_TAN 0 +#define M_GP_CNT4_TAN_MASK GENMASK(9, 0) +#define M_GP_CNT5_TE2D GENMASK(19, 10) +#define M_GP_CNT5_TE2D_SHIFT 10 +#define M_GP_CNT6_CN GENMASK(29, 20) +#define M_GP_CNT6_CN_SHIFT 20 +#define SE_GENI_CFG_REG95 0x27C +#define M_GP_CNT7 GENMASK(9, 0) +#define M_GP_CNT7_TSN 0 +#define SPI_INTER_WORDS_DLY 0 +//#define SPI_CS_CLK_DLY 0x50 //80 from VI SW +#define SPI_CS_CLK_DLY 0x80 //128 from ganges SW +#define SPI_PIPE_DLY_TPM 0x320 //800 from VI SW +#define SE_GENI_CFG_REG103 0x29C +#define S_GP_CNT5 GENMASK(19, 10) +#define S_GP_CNT5_SHIFT 10 +#define S_GP_CNT5_TDN 0 +#define SE_GENI_CFG_REG104 0x2A0 +#define S_GP_CNT7 GENMASK(9, 0) +//#define S_GP_CNT7_SSN 0x50 //80 from VI SW +#define S_GP_CNT7_SSN 0x80 //128 from ganges SW +//#define M_GP_CNT6_CN_DELAY 0x3f //63 from VI SW +#define M_GP_CNT6_CN_DELAY 0x50 //trying with 80 from SW + +#define SE_SPI_WORD_LEN 0x268 +#define WORD_LEN_MSK GENMASK(9, 0) +#define MIN_WORD_LEN 4 +#define NUMBER_OF_DATA_LINES GENMASK(1, 0) +#define PARAM_14 BIT(14) +#define SE_GENI_CGC_CTRL 0x28 +#define SE_GENI_CFG_SEQ_START 0x84 +#define SE_GENI_CFG_STATUS 0x88 +#define SE_UART_TX_TRANS_CFG 0x25C +#define CFG_SEQ_DONE BIT(1) +#define SPI_CS_CLK_DL 0 +#define SPI_PRE_POST_CMD_DLY 0 + +#define SE_SPI_CPHA 0x224 +#define CPHA BIT(0) +#define SE_SPI_CPOL 0x230 +#define CPOL BIT(2) +#define SPI_LSB_TO_MSB 0 +#define SPI_MSB_TO_LSB 1 + +#define SE_SPI_TX_TRANS_LEN 0x26c +#define SE_SPI_RX_TRANS_LEN 0x270 +#define TRANS_LEN_MSK GENMASK(23, 0) + +/* HRF FLOW Info */ +#define HRF_ENTRY_OPCODE 3 +#define HRF_ENTRY_TYPE 3 +#define HRF_ENTRY_FLOW 0 +#define HRF_ENTRY_PARITY 0 +#define HRF_ENTRY_DATA_LEN 16 //HRF entry always has DW=3 + +#define LRA_SINGLE_REG_LENGTH 4 + +/* M_CMD OP codes for Q2SPI */ +#define Q2SPI_TX_ONLY (1) +#define Q2SPI_RX_ONLY (2) +#define Q2SPI_TX_RX (7) + +/* M_CMD params for Q2SPI */ +#define PRE_CMD_DELAY BIT(0) +#define TIMESTAMP_BEFORE BIT(1) +#define TIMESTAMP_AFTER BIT(3) +#define POST_CMD_DELAY BIT(4) +#define Q2SPI_MODE GENMASK(11, 8) +#define Q2SPI_MODE_SHIFT 8 +#define SINGLE_SDR_MODE 0 +#define Q2SPI_CMD BIT(14) + +#define CS_MODE CS_LESS_MODE +#define Q2SPI_INTR_POL INTR_HIGH_POLARITY + +#define CR_BULK_DATA_size 1 +#define CR_DMA_DATA_size 7 +// max Minor devices +#define MAX_DEV 2 +#define DEVICE_NAME_MAX_LEN 64 + +//Q2SPI specific configuration +#define QSPI_NUM_CS 2 +#define QSPI_BYTES_PER_WORD 4 + +//#define Q2SPI_LSB_FIRST _BITUL(3) /* per-word bits-on-wire */ + +#define Q2SPI_INFO(q2spi_ptr, x...) do { \ +if (q2spi_ptr) { \ + ipc_log_string(q2spi_ptr->ipc, x); \ + if (q2spi_ptr->dev) \ + q2spi_trace_log(q2spi_ptr->dev, x); \ + pr_info(x); \ +} \ +} while (0) + +#define Q2SPI_DEBUG(q2spi_ptr, x...) do { \ +if (q2spi_ptr) { \ + GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \ + if (q2spi_ptr->dev) \ + q2spi_trace_log(q2spi_ptr->dev, x); \ +} \ +} while (0) + +#define Q2SPI_ERROR(q2spi_ptr, x...) do { \ +if (q2spi_ptr) { \ + GENI_SE_ERR(q2spi_ptr->ipc, true, q2spi_ptr->dev, x); \ + if (q2spi_ptr->dev) \ + q2spi_trace_log(q2spi_ptr->dev, x); \ +} \ +} while (0) + +#define DATA_BYTES_PER_LINE (64) +#define CHUNK_SIZE (16) + +/* global storage for device Major number */ +static int q2spi_cdev_major; +static int q2spi_alloc_count; + +enum abort_code { + TERMINATE_CMD = 0, + ERR_DUPLICATE_ID = 1, + ERR_NOT_VALID = 2, + ERR_ACCESS_BLOCKED = 3, + ERR_DWLEN = 4, + OTHERS = 5, +}; + +struct q2spi_mc_hrf_entry { + u8 cmd:4; + u8 flow:1; + u8 type:2; + u8 parity:1; + u8 resrv_0:4; + u8 flow_id:4; + u8 resrv_1:4; + u8 dwlen_part1:4; + u8 dwlen_part2:8; + u8 dwlen_part3:8; + u8 arg1:8; + u8 arg2:8; + u8 arg3:8; + u8 reserved[8]; +}; + +/** + * struct q2spi_ch_header structure of cr header + * @flow: flow direction of cr hdr, 1: CM flow, 0: MC flow + */ +struct q2spi_cr_header { + u8 cmd:4; + u8 flow:1; + u8 type:2; + u8 parity:1; +}; + +struct q2spi_client_bulk_access_pkt { + u8 cmd:4; + u8 flow:1; + u8 rsvd:2; + u8 parity:1; + u8 status:4; + u8 flow_id:4; + u8 reserved[2]; +}; + +struct q2spi_client_dma_pkt { + u8 seg_len:4; + u8 flow_id:4; + u8 interrupt:1; + u8 seg_last:1; + u8 channel:2; + u8 dw_len_part1:4; + u8 dw_len_part2:8; + u8 dw_len_part3:8; + u8 arg1:8; + u8 arg2:8; + u8 arg3:8; +}; + +struct q2spi_host_variant1_pkt { + u8 cmd:4; + u8 flow:1; + u8 interrupt:1; + u8 seg_last:1; + u8 rsvd:1; + u8 dw_len:2; + u8 access_type:1; + u8 address_mode:1; + u8 flow_id:4; + u8 reg_offset; + u8 reserved[4]; + u8 data_buf[16]; + u8 status; +}; + +struct q2spi_host_variant4_5_pkt { + u8 cmd:4; + u8 flow:1; + u8 interrupt:1; + u8 seg_last:1; + u8 rsvd:1; + u8 dw_len_part1:2; + u8 access_type:1; + u8 address_mode:1; + u8 flow_id:4; + u8 dw_len_part2; + u8 rsvd_1[4]; + u8 data_buf[4096]; + u8 status; +}; + +struct q2spi_host_abort_pkt { + u8 cmd:4; + u8 rsvd:4; + u8 code:4; + u8 flow_id:4; + u8 reserved[5]; +}; + +struct q2spi_host_soft_reset_pkt { + u8 cmd:4; + u8 flags:4; + u8 code:4; + u8 flow_id:4; + u8 reserved[5]; +}; + +enum cr_var_type { + VARIANT_T_3 = 1, //T:3 DMA CR type + VARIANT_T_4 = 2, //TODO check + VARIANT_T_5 = 3, //TODO check +}; + +enum var_type { + VARIANT_1 = 1, + VARIANT_2 = 2, + VARIANT_3 = 3, + VARIANT_4 = 4, + VARIANT_5 = 5, + VARIANT_1_HRF = 6, + VAR_ABORT = 7, + VAR_SOFT_RESET = 8, +}; + +/** + * struct q2spi_chrdev - structure for character device + * q2spi_dev: q2spi device + * @cdev: cdev pointer + * @major: major number of q2spi device + * @minor: minor number of q2spi device + * @dev: basic device structure. + * @dev_name: name of the device + * @class_dev: pointer to char dev class + * @q2spi_class: pointer to q2spi class + */ +struct q2spi_chrdev { + dev_t q2spi_dev; + struct cdev cdev[MAX_DEV]; + int major; + int minor; + struct device *dev; + char dev_name[DEVICE_NAME_MAX_LEN]; + struct device *class_dev; + struct class *q2spi_class; +}; + +/** + * struct q2spi_dma_transfer - q2spi transfer dmadata + * @tx_buf: TX data buffer + * @rx_buf: RX data buffer + * @tx_len: length of the Tx transfer + * @rx_len: length of the rx transfer + * @tx_dma: dma pointer for Tx transfer + * @rx_dma: dma pointer for Rx transfer + * @cmd: q2spi cmd type + * @tid: Unique Transaction ID. Used for q2spi messages. + */ +struct q2spi_dma_transfer { + void *tx_buf; + void *rx_buf; + unsigned int tx_len; + unsigned int rx_len; + unsigned int tx_data_len; + unsigned int rx_data_len; + dma_addr_t tx_dma; + dma_addr_t rx_dma; + enum cmd_type cmd; + int tid; //tid and flow_id same check ? + struct list_head queue; +}; + +/** + * struct q2spi_geni - structure to store Q2SPI GENI information + * @wrapper_dev: qupv3 wrapper device pointer + * @dev: q2spi device pointer + * @base: pointer to ioremap()'d registers + * @m_ahb_clk: master ahb clock for the controller + * @s_ahb_clk: slave ahb clock for the controller + * @se_clk: serial engine clock + * @geni_pinctrl: pin-controller's instance + * @geni_gpio_active: active state pin control + * @geni_gpio_sleep: sleep state pin control + * q2spi_chrdev: cdev structure + * @geni_se: stores info parsed from device tree + * @q2spi_dma_transfer: stores Q2SPI transfer dma information + * @q2spi_gsi: stores GSI structure information + * @xfer: reference to q2spi_dma_transfer structure + * @db_xfer: reference to q2spi_dma_transfer structure for doorbell + * @req: reference to q2spi request structure + * @c_req: reference to q2spi client request structure + * @rx_fifo_depth: RX FIFO depth + * @tx_fifo_depth: TX FIFO depth + * @tx_fifo_width: TX FIFO width + * @setup_config0: used to mark config0 setup completion + * @irq: IRQ of the SE + * @lock: Lock to protect xfer + * @tid_idr: tid id allocator + * @readq: waitqueue for rx data. + */ +struct q2spi_geni { + struct device *wrapper_dev; + struct device *dev; + void __iomem *base; + struct clk *m_ahb_clk; + struct clk *s_ahb_clk; + struct clk *se_clk; + struct pinctrl *geni_pinctrl; + struct pinctrl_state *geni_gpio_active; + struct pinctrl_state *geni_gpio_sleep; + struct q2spi_chrdev chrdev; + struct geni_se se; + struct q2spi_gsi *gsi; + bool qup_gsi_err; + struct q2spi_dma_transfer *xfer; + struct q2spi_dma_transfer *db_xfer; + struct q2spi_request *req; + struct q2spi_client_request *c_req; + bool setup_config0; + int irq; + struct list_head tx_queue_list; + struct list_head rx_queue_list; + struct list_head cr_queue_list; + struct list_head hc_cr_queue_list; + struct kthread_worker *kworker; + struct kthread_work send_messages; + /* lock to protect gsi operations one at a time */ + struct mutex gsi_lock; + /* lock to protect transfer id allocation and free */ + spinlock_t txn_lock; + /* lock to protect HC operations one at a time*/ + struct mutex queue_lock; + /* lock to protect CR of operations one at a time*/ + spinlock_t cr_queue_lock; + u32 max_speed_hz; + u32 cur_speed_hz; + int oversampling; + int xfer_mode; + int cur_xfer_mode; + bool gsi_mode; /* GSI Mode */ + void *q2spi_buf; + bool cmd_done; + struct completion tx_cb; + struct completion rx_cb; + atomic_t rx_avail; + struct idr tid_idr; + wait_queue_head_t readq; + void *rx_buf; + dma_addr_t rx_dma; + bool hrf_flow; + struct completion doorbell_up; + void *var1_buf[Q2SPI_MAX_BUF]; + dma_addr_t var1_dma_buf[Q2SPI_MAX_BUF]; + void *var5_buf[Q2SPI_MAX_BUF]; + dma_addr_t var5_dma_buf[Q2SPI_MAX_BUF]; + void *cr_buf[Q2SPI_MAX_BUF]; + dma_addr_t cr_dma_buf[Q2SPI_MAX_BUF]; + void *var1_buf_used[Q2SPI_MAX_BUF]; + void *var5_buf_used[Q2SPI_MAX_BUF]; + void *cr_buf_used[Q2SPI_MAX_BUF]; + void *bulk_buf[Q2SPI_MAX_BUF]; + dma_addr_t bulk_dma_buf[Q2SPI_MAX_BUF]; + void *bulk_buf_used[Q2SPI_MAX_BUF]; + dma_addr_t dma_buf; + struct completion sync_wait; + bool init; + void *ipc; + struct work_struct q2spi_doorbell_work; + struct workqueue_struct *doorbell_wq; + struct q2spi_cr_packet *cr_pkt; + bool doorbell_setup; + struct qup_q2spi_cr_header_event q2spi_cr_hdr_event; + wait_queue_head_t read_wq; +}; + +/** + * struct q2spi_cr_packet - structure for Q2SPI CR packet + * + * @q2spi: pointer for q2spi_geni structure + * @cr_hdr: pointer for q2spi_cr_header structure + * @var3: pointer for q2spi_client_dma_pkt structure + * @bulk: pointer for q2spi_client_bulk_access_pkt structure + * @vtype: variant type. + * @hrf_flow_id: flow id used for transaction. + * @list: list for CR packets. + */ +struct q2spi_cr_packet { + struct q2spi_cr_header *cr_hdr[4]; + struct q2spi_client_dma_pkt var3_pkt; /* 4.2.2.3 Variant 4 T=3 */ + struct q2spi_client_bulk_access_pkt bulk_pkt; /* 4.2.2.5 Bulk Access Status */ + //u8 flow_id[4]; + //u8 flow[4]; + enum cr_var_type vtype; + u8 hrf_flow_id; + struct list_head list; + int no_of_valid_crs; + u8 type; /* 01 -> bulk, 02 -> var3 (01 10 10 01) */ + struct q2spi_dma_transfer *xfer; +}; + +/** + * struct q2spi_packet - structure for Q2SPI packet + * + * @m_cmd_param: cmd corresponding to q2spi_packet + * @var1_pkt: pointer for HC variant1_pkt structure + * @var4_pkt: pointer for HC_variant4_5_pkt structure + * @var5_pkt: pointer for HC variant4_5_pkt structure + * @abort_pkt: pointer for abort_pkt structure + * @soft_reset_pkt: pointer for q2spi_soft_reset_pkt structure + * @vtype: variant type. + * @valid: packet valid or not. + * @hrf_flow_id: flow id usedyy for transaction. + * @status: success of failure xfer status + * @var1_tx_dma: variant_1 tx_dma buffer pointer + * @var5_tx_dma: variant_5 tx_dma buffer pointer + * @sync: sync or async mode of transfer + * @q2spi: pointer for q2spi_geni structure + * @list: list for hc packets. + * @in_use: Represents if packet is under use + * @data_length: Represents data length of the packet transfer + */ +struct q2spi_packet { + unsigned int m_cmd_param; + struct q2spi_host_variant1_pkt *var1_pkt; /* 4.4.3.1 Variant 1 */ + struct q2spi_host_variant4_5_pkt *var4_pkt; /*4.4.3.3 Variant 4 */ + struct q2spi_host_variant4_5_pkt *var5_pkt; /*4.4.3.3 Variant 5 */ + struct q2spi_host_abort_pkt *abort_pkt; /* 4.4.4 Abort Command */ + struct q2spi_host_soft_reset_pkt *soft_reset_pkt; /*4.4.6.2 Soft Reset Command */ + enum var_type vtype; + bool valid; + u8 hrf_flow_id; + enum xfer_status status; + dma_addr_t var1_tx_dma; + dma_addr_t var5_tx_dma; + bool sync; + struct q2spi_geni *q2spi; + struct list_head list; + bool in_use; + unsigned int data_length; +}; + +void q2spi_doorbell(struct q2spi_geni *q2spi, const struct qup_q2spi_cr_header_event *event); +void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *ptr); +void q2spi_geni_se_dump_regs(struct q2spi_geni *q2spi); +void q2spi_dump_ipc(struct q2spi_geni *q2spi, void *ipc_ctx, char *prefix, char *str, int size); +void q2spi_add_req_to_rx_queue(struct q2spi_geni *q2spi, u32 status, u32 cmd); +void q2spi_trace_log(struct device *dev, const char *fmt, ...); +void dump_ipc(struct q2spi_geni *q2spi, void *ctx, char *prefix, char *str, int size); +void *q2spi_kzalloc(struct q2spi_geni *q2spi, int size); +void q2spi_kfree(struct q2spi_geni *q2spi, void *ptr); +int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt); +int q2spi_alloc_xfer_tid(struct q2spi_geni *q2spi); +int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi); +int check_gsi_transfer_completion(struct q2spi_geni *q2spi); +int check_gsi_transfer_completion_rx(struct q2spi_geni *q2spi); + +#endif /* _SPI_Q2SPI_H_ */ diff --git a/drivers/spi/q2spi-slave-reg.h b/drivers/spi/q2spi-slave-reg.h new file mode 100644 index 000000000000..396945d54c18 --- /dev/null +++ b/drivers/spi/q2spi-slave-reg.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _SPI_Q2SPI_SLAVE_H_ +#define _SPI_Q2SPI_SLAVE_H_ + +#define Q2SPI_SLAVE_BASE 0x42808000 +#define Q2SPI_OFFSET_MASK 0x4 +#define Q2SPI_HW_VERSION 0x00000000 +#define Q2SPI_DRIVER_VERSION 0x00000004 +#define Q2SPI_OP_MODE1 0x00000010 +#define Q2SPI_OP_MODE2 0x00000014 +#define Q2SPI_HRF_PUSH_ADDRESS 0x00000018 +#define Q2SPI_CAP0 0x00000024 +#define Q2SPI_CAP1 0x00000028 +#define Q2SPI_SCRATCH0 0x00000030 +#define Q2SPI_SCRATCH1 0x00000034 +#define Q2SPI_SCRATCH2 0x00000038 +#define Q2SPI_SCRATCH3 0x0000003C +#define Q2SPI_DB_STATUS 0x00000040 +#define Q2SPI_ABORT_STATUS 0x00000044 +#define Q2SPI_CLIENT_STATE 0x00000048 +#define Q2SPI_RUNTIME_STATUS 0x0000004C +#define Q2SPI_TDB_FREE_SPACE 0x00000050 +#define Q2SPI_SLAVE_ERROR 0x00000054 +#define Q2SPI_HDR_ERROR 0x00000058 +#define Q2SPI_ERROR_EN 0x0000005C +#define Q2SPI_SMA_DATA(n) (0x00000070 + (0x4 * (n))) +#define Q2SPI_SMA_ADDR1 0x00000080 +#define Q2SPI_SMA_ADDR2 0x00000084 +#define Q2SPI_SMA_CTRL 0x00000088 +#define Q2SPI_PURGE_COMPLETE 0x0000008C +#define Q2SPI_HOST_CFG 0x00000090 +#endif /* _SPI_Q2SPI_SLAVE_H_ */ diff --git a/drivers/spi/q2spi-trace.h b/drivers/spi/q2spi-trace.h new file mode 100644 index 000000000000..9158832b576f --- /dev/null +++ b/drivers/spi/q2spi-trace.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM q2spi_trace + +#if !defined(_TRACE_Q2SPI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_Q2SPI_TRACE_H + +#include +#include + +#define MAX_MSG_LEN 256 + +TRACE_EVENT(q2spi_log_info, + TP_PROTO(const char *name, struct va_format *vaf), + TP_ARGS(name, vaf), + TP_STRUCT__entry(__string(name, name) + __dynamic_array(char, msg, MAX_MSG_LEN)), + TP_fast_assign(__assign_str(name, name); + WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg), MAX_MSG_LEN, + vaf->fmt, *vaf->va) >= MAX_MSG_LEN);), + TP_printk("%s: %s", __get_str(name), __get_str(msg)) +); + +#endif /* _TRACE_Q2SPI_TRACE_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE q2spi-trace +#include + diff --git a/include/linux/msm_gpi.h b/include/linux/msm_gpi.h index f4ea63f77c76..24f3aa8b1367 100644 --- a/include/linux/msm_gpi.h +++ b/include/linux/msm_gpi.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __MSM_GPI_H_ @@ -12,6 +13,15 @@ struct __packed msm_gpi_tre { u32 dword[4]; }; +enum GPI_EV_TYPE { + XFER_COMPLETE_EV_TYPE = 0x22, + IMMEDIATE_DATA_EV_TYPE = 0x30, + QUP_NOTIF_EV_TYPE = 0x31, + STALE_EV_TYPE = 0xFF, + QUP_TCE_TYPE_Q2SPI_STATUS = 0x35, + QUP_TCE_TYPE_Q2SPI_CR_HEADER = 0x36, +}; + enum msm_gpi_tre_type { MSM_GPI_TRE_INVALID = 0x00, MSM_GPI_TRE_NOP = 0x01, @@ -202,6 +212,7 @@ enum msm_gpi_cb_event { MSM_GPI_QUP_PENDING_EVENT, MSM_GPI_QUP_EOT_DESC_MISMATCH, MSM_GPI_QUP_SW_ERROR, + MSM_GPI_QUP_CR_HEADER, MSM_GPI_QUP_MAX_EVENT, }; @@ -211,12 +222,32 @@ struct msm_gpi_error_log { u32 error_code; }; +struct __packed qup_q2spi_cr_header_event { + u32 cr_hdr_0 : 8; + u32 cr_hdr_1 : 8; + u32 cr_hdr_2 : 8; + u32 cr_hdr_3 : 8; + u32 cr_ed_byte_0 : 8; + u32 cr_ed_byte_1 : 8; + u32 cr_ed_byte_2 : 8; + u32 cr_ed_byte_3 : 8; + u32 reserved0 : 24; + u8 code : 8; + u32 byte0_len : 4; + u32 reserved1 : 3; + u32 byte0_err : 1; + u32 reserved2 : 8; + u8 type : 8; + u8 ch_id : 8; +}; + struct msm_gpi_cb { enum msm_gpi_cb_event cb_event; u64 status; u64 timestamp; u64 count; struct msm_gpi_error_log error_log; + struct __packed qup_q2spi_cr_header_event q2spi_cr_header_event; }; struct dma_chan; @@ -261,6 +292,8 @@ struct msm_gpi_dma_async_tx_cb_param { u32 status; struct __packed msm_gpi_tre imed_tre; void *userdata; + enum GPI_EV_TYPE tce_type; + u32 q2spi_status:8; }; /* Client drivers of the GPI can call this function to dump the GPI registers diff --git a/include/linux/qcom-geni-se-common.h b/include/linux/qcom-geni-se-common.h index 8d0969e0bc3c..dedeb73e33db 100644 --- a/include/linux/qcom-geni-se-common.h +++ b/include/linux/qcom-geni-se-common.h @@ -44,6 +44,7 @@ if (print) { \ /* In KHz */ #define DEFAULT_SE_CLK 19200 #define SPI_CORE2X_VOTE 51000 +#define Q2SPI_CORE2X_VOTE 100000 #define I2C_CORE2X_VOTE 50000 #define I3C_CORE2X_VOTE 19200 #define APPS_PROC_TO_QUP_VOTE 140000 @@ -56,6 +57,7 @@ if (print) { \ #define SE_DMA_TX_LEN (0xC3C) #define SE_DMA_TX_IRQ_EN (0xC48) #define SE_DMA_TX_LEN_IN (0xC54) +#define GENI_SE_DMA_EOT_BUF (BIT(0)) #define SE_DMA_RX_PTR_L (0xD30) #define SE_DMA_RX_PTR_H (0xD34) diff --git a/include/linux/soc/qcom/geni-se.h b/include/linux/soc/qcom/geni-se.h index d2b58201d8b0..f4dab5ace4e5 100644 --- a/include/linux/soc/qcom/geni-se.h +++ b/include/linux/soc/qcom/geni-se.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. - * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _LINUX_QCOM_GENI_SE @@ -37,6 +37,7 @@ enum geni_se_protocol_type { GENI_SE_I2C, GENI_SE_I3C, GENI_SE_SPI_SLAVE, + GENI_SE_Q2SPI = 0xE, }; struct geni_wrapper;