Merge remote-tracking branch into HEAD

* keystone/mirror-android14-6.1-2023-06:
  ANDROID: Update symbol list for mtk
  ANDROID: GKI: Update symbol list for MediatTek
  UPSTREAM: scsi: ufs: core: Remove dedicated hwq for dev command
  BACKPORT: scsi: ufs: mcq: Fix the incorrect OCS value for the device command
  FROMLIST: scsi: ufs: ufs-mediatek: Add MCQ support for MTK platform
  FROMLIST: scsi: ufs: core: Export symbols for MTK driver module
  UPSTREAM: blk-mq: check on cpu id when there is only one ctx mapping

Change-Id: Ib6483ec3e3add0c1cd52dcdb138358f488c82929
Signed-off-by: keystone-kernel-automerger <keystone-kernel-automerger@google.com>
This commit is contained in:
keystone-kernel-automerger 2023-07-20 07:21:20 +00:00
commit 140c3a0ad5
9 changed files with 299 additions and 24 deletions

View file

@ -292675,6 +292675,12 @@ function {
parameter_id: 0x18bd6530
parameter_id: 0x3f54a013
}
function {
id: 0x45846d91
return_type_id: 0xc9082b19
parameter_id: 0x078316ff
parameter_id: 0x6720d32f
}
function {
id: 0x45a8a3c4
return_type_id: 0x00c83ba6
@ -367708,6 +367714,15 @@ elf_symbol {
type_id: 0x6ecc6402
full_name: "sk_alloc"
}
elf_symbol {
id: 0x1295815b
name: "sk_capable"
is_defined: true
symbol_type: FUNCTION
crc: 0xa18d7705
type_id: 0xf1208847
full_name: "sk_capable"
}
elf_symbol {
id: 0xd7cc5056
name: "sk_common_release"
@ -374218,6 +374233,15 @@ elf_symbol {
type_id: 0x11ebebf2
full_name: "ufshcd_mcq_config_esi"
}
elf_symbol {
id: 0x71dad1b2
name: "ufshcd_mcq_config_mac"
is_defined: true
symbol_type: FUNCTION
crc: 0xb385bc56
type_id: 0x125cb480
full_name: "ufshcd_mcq_config_mac"
}
elf_symbol {
id: 0x051c0489
name: "ufshcd_mcq_enable_esi"
@ -374227,6 +374251,15 @@ elf_symbol {
type_id: 0x1178942c
full_name: "ufshcd_mcq_enable_esi"
}
elf_symbol {
id: 0x562c9a11
name: "ufshcd_mcq_make_queues_operational"
is_defined: true
symbol_type: FUNCTION
crc: 0x32eb2fea
type_id: 0x1178942c
full_name: "ufshcd_mcq_make_queues_operational"
}
elf_symbol {
id: 0x56d1da85
name: "ufshcd_mcq_poll_cqe_lock"
@ -374245,6 +374278,15 @@ elf_symbol {
type_id: 0x30f8e5d3
full_name: "ufshcd_mcq_poll_cqe_nolock"
}
elf_symbol {
id: 0xaefd3622
name: "ufshcd_mcq_read_cqis"
is_defined: true
symbol_type: FUNCTION
crc: 0x41e6e314
type_id: 0x45846d91
full_name: "ufshcd_mcq_read_cqis"
}
elf_symbol {
id: 0x3ecd46f0
name: "ufshcd_mcq_write_cqis"
@ -385458,6 +385500,7 @@ interface {
symbol_id: 0x4f6b19f1
symbol_id: 0xd4ea779d
symbol_id: 0x67a38bc7
symbol_id: 0x1295815b
symbol_id: 0xd7cc5056
symbol_id: 0x40df740b
symbol_id: 0x89a97545
@ -386182,9 +386225,12 @@ interface {
symbol_id: 0xcd6f9634
symbol_id: 0xba7435fd
symbol_id: 0xb59c5eaf
symbol_id: 0x71dad1b2
symbol_id: 0x051c0489
symbol_id: 0x562c9a11
symbol_id: 0x56d1da85
symbol_id: 0xe74af45a
symbol_id: 0xaefd3622
symbol_id: 0x3ecd46f0
symbol_id: 0xde541a7b
symbol_id: 0xe6e08555

View file

@ -98,10 +98,10 @@
blocking_notifier_call_chain
blocking_notifier_chain_register
blocking_notifier_chain_unregister
bpf_trace_run1
bpf_trace_run10
bpf_trace_run11
bpf_trace_run12
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
@ -690,8 +690,11 @@
drm_atomic_helper_commit_modeset_disables
drm_atomic_helper_commit_modeset_enables
drm_atomic_helper_commit_planes
__drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_destroy_state
__drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_duplicate_state
__drm_atomic_helper_connector_reset
drm_atomic_helper_connector_reset
__drm_atomic_helper_crtc_destroy_state
__drm_atomic_helper_crtc_duplicate_state
@ -1089,6 +1092,7 @@
ida_destroy
ida_free
idr_alloc
idr_alloc_cyclic
idr_alloc_u32
idr_destroy
idr_find
@ -1180,6 +1184,7 @@
iommu_report_device_fault
iommu_unmap
ioremap_prot
io_schedule_timeout
iounmap
iov_iter_init
iov_iter_kvec
@ -1235,9 +1240,9 @@
is_vmalloc_addr
iterate_dir
iterate_fd
jiffies
jiffies_64_to_clock_t
jiffies64_to_nsecs
jiffies
jiffies_to_msecs
jiffies_to_usecs
kasan_flag_enabled
@ -1430,8 +1435,8 @@
memremap
memscan
mem_section
memset64
memset
memset64
__memset_io
memstart_addr
memunmap
@ -1563,8 +1568,8 @@
nla_find
nla_memcpy
__nla_parse
nla_put_64bit
nla_put
nla_put_64bit
nla_put_nohdr
nla_reserve
nla_strscpy
@ -1687,6 +1692,8 @@
out_of_line_wait_on_bit_timeout
overflowuid
page_endio
page_pinner_inited
__page_pinner_put_page
page_pool_alloc_pages
page_pool_create
page_pool_destroy
@ -1856,6 +1863,7 @@
pm_runtime_force_suspend
pm_runtime_get_if_active
__pm_runtime_idle
pm_runtime_irq_safe
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_set_status
@ -2061,6 +2069,7 @@
remove_proc_subtree
remove_wait_queue
request_firmware
request_firmware_direct
request_firmware_nowait
__request_module
__request_percpu_irq
@ -2123,6 +2132,7 @@
rtnl_unregister
rtnl_unregister_all
runqueues
sbitmap_weight
sched_clock
sched_clock_register
sched_feat_keys
@ -2145,11 +2155,13 @@
scnprintf
scsi_autopm_get_device
scsi_autopm_put_device
scsi_block_requests
scsi_device_get
scsi_device_put
scsi_execute_cmd
__scsi_iterate_devices
scsi_print_sense_hdr
scsi_unblock_requests
sdio_claim_host
sdio_claim_irq
sdio_disable_func
@ -2283,6 +2295,7 @@
skb_try_coalesce
skb_tstamp_tx
skb_unlink
sk_capable
sk_common_release
sk_error_report
sk_filter_trim_cap
@ -2501,6 +2514,7 @@
tasklist_lock
__task_pid_nr_ns
__task_rq_lock
task_rq_lock
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_pressure
@ -2576,6 +2590,7 @@
__traceiter_android_rvh_tick_entry
__traceiter_android_rvh_try_to_wake_up
__traceiter_android_rvh_try_to_wake_up_success
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_cpu_capacity
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_alter_futex_plist_add
@ -2670,6 +2685,7 @@
__tracepoint_android_rvh_tick_entry
__tracepoint_android_rvh_try_to_wake_up
__tracepoint_android_rvh_try_to_wake_up_success
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_cpu_capacity
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_alter_futex_plist_add
@ -2801,6 +2817,7 @@
udp_tunnel6_xmit_skb
udp_tunnel_sock_release
udp_tunnel_xmit_skb
ufshcd_config_pwr_mode
ufshcd_delay_us
ufshcd_dme_configure_adapt
ufshcd_dme_get_attr
@ -2810,14 +2827,19 @@
ufshcd_get_pwr_dev_param
ufshcd_hba_enable
ufshcd_hba_stop
ufshcd_hold
ufshcd_init_pwr_dev_param
ufshcd_link_recovery
ufshcd_make_hba_operational
ufshcd_mcq_config_mac
ufshcd_mcq_make_queues_operational
ufshcd_mcq_read_cqis
ufshcd_pltfrm_init
ufshcd_query_attr
ufshcd_query_descriptor_retry
ufshcd_query_flag
ufshcd_read_desc_param
ufshcd_release
ufshcd_remove
ufshcd_resume_complete
ufshcd_runtime_resume

View file

@ -1188,8 +1188,9 @@ bool blk_mq_complete_request_remote(struct request *rq)
* or a polled request, always complete locally,
* it's pointless to redirect the completion.
*/
if (rq->mq_hctx->nr_ctx == 1 ||
rq->cmd_flags & REQ_POLLED)
if ((rq->mq_hctx->nr_ctx == 1 &&
rq->mq_ctx->cpu == raw_smp_processor_id()) ||
rq->cmd_flags & REQ_POLLED)
return false;
if (blk_mq_complete_need_ipi(rq)) {

View file

@ -20,12 +20,10 @@
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16
#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
@ -99,6 +97,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
@ -115,8 +114,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
/* uhq[0] is used to serve device commands */
return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
return &hba->uhq[hwq];
}
/**
@ -160,8 +158,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
/* maxq is 0 based value */
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
rw_queues;
tot_queues = read_queues + poll_queues + rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
@ -169,7 +166,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
@ -195,7 +192,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
hba->nr_hw_queues = host->nr_hw_queues;
return 0;
}
@ -249,6 +246,7 @@ u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
@ -402,6 +400,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
MCQ_CFG_n(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
@ -455,8 +454,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
/* Give dev_cmd_queue the minimal number of entries */
hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
host->host_tagset = 1;
return 0;

View file

@ -84,7 +84,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,

View file

@ -3166,7 +3166,7 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@ -3262,7 +3262,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
@ -5520,6 +5519,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
@ -5538,7 +5538,11 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
trace_android_vh_ufs_compl_command(hba, lrbp);
hba->dev_cmd.cqe = cqe;
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.dword_2 =
cpu_to_le32(ocs);
}
ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
@ -5592,7 +5596,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
@ -5646,7 +5650,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag);
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[hwq_num];
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);

View file

@ -27,8 +27,14 @@
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
#undef CREATE_TRACE_POINTS
#define MAX_SUPP_MAC 64
#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
@ -843,6 +849,38 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct platform_device *pdev;
int i;
int irq;
host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
pdev = container_of(hba->dev, struct platform_device, dev);
for (i = 0; i < host->mcq_nr_intr; i++) {
/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
irq = platform_get_irq(pdev, i + 1);
if (irq < 0) {
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
dev_err(hba->dev, "get platform mcq irq fail: %d\n", i);
goto failed;
}
host->mcq_intr_info[i].hba = hba;
host->mcq_intr_info[i].irq = irq;
dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
}
return;
failed:
/* invalidate irq info */
for (i = 0; i < host->mcq_nr_intr; i++)
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
host->mcq_nr_intr = 0;
}
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@ -879,6 +917,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
ufs_mtk_init_mcq_irq(hba);
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
@ -1176,7 +1216,17 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
else
return err;
err = ufshcd_make_hba_operational(hba);
if (!hba->mcq_enabled) {
err = ufshcd_make_hba_operational(hba);
} else {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
/* Enable MCQ mode */
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
REG_UFS_MEM_CFG);
}
if (err)
return err;
@ -1500,6 +1550,121 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
return 0;
}
static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
{
return MAX_SUPP_MAC;
}
static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_mcq_opr_info_t *opr;
int i;
hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->stride = REG_UFS_MCQ_STRIDE;
opr->base = hba->mmio_base + opr->offset;
}
return 0;
}
static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* fail mcq initialization if interrupt is not filled properly */
if (!host->mcq_nr_intr) {
dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
return -EINVAL;
}
hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
return 0;
}
static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
{
struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
struct ufs_hba *hba = mcq_intr_info->hba;
struct ufs_hw_queue *hwq;
u32 events;
int qid = mcq_intr_info->qid;
hwq = &hba->uhq[qid];
events = ufshcd_mcq_read_cqis(hba, qid);
if (events)
ufshcd_mcq_write_cqis(hba, events, qid);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}
static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
int ret;
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
if (irq == MTK_MCQ_INVALID_IRQ) {
dev_err(hba->dev, "invalid irq. %d\n", i);
return -ENOPARAM;
}
host->mcq_intr_info[i].qid = i;
ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
&host->mcq_intr_info[i]);
dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
if (ret) {
dev_err(hba->dev, "Cannot request irq %d\n", ret);
return ret;
}
}
return 0;
}
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int ret = 0;
if (!host->mcq_set_intr) {
/* Disable irq option register */
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
if (irq) {
ret = ufs_mtk_config_mcq_irq(hba);
if (ret)
return ret;
}
host->mcq_set_intr = true;
}
ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
return 0;
}
static int ufs_mtk_config_esi(struct ufs_hba *hba)
{
return ufs_mtk_config_mcq(hba, true);
}
/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@ -1523,6 +1688,11 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.event_notify = ufs_mtk_event_notify,
.config_scaling_param = ufs_mtk_config_scaling_param,
.clk_scale_notify = ufs_mtk_clk_scale_notify,
/* mcq vops */
.get_hba_mac = ufs_mtk_get_hba_mac,
.op_runtime_config = ufs_mtk_op_runtime_config,
.mcq_config_resource = ufs_mtk_mcq_config_resource,
.config_esi = ufs_mtk_config_esi,
};
/**
@ -1569,7 +1739,7 @@ skip_reset:
out:
if (err)
dev_info(dev, "probe failed %d\n", err);
dev_err(dev, "probe failed %d\n", err);
of_node_put(reset_node);
return err;

View file

@ -10,11 +10,27 @@
#include <linux/pm_qos.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
* MCQ define and struct
*/
#define UFSHCD_MAX_Q_NR 8
#define MTK_MCQ_INVALID_IRQ 0xFFFF
/* REG_UFS_MMIO_OPT_CTRL_0 160h */
#define EHS_EN BIT(0)
#define PFM_IMPV BIT(1)
#define MCQ_MULTI_INTR_EN BIT(2)
#define MCQ_CMB_INTR_EN BIT(3)
#define MCQ_AH8 BIT(4)
#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
/*
* Vendor specific UFSHCI Registers
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
#define REG_UFS_MTK_IP_VER 0x2240
@ -26,6 +42,13 @@
#define REG_UFS_DEBUG_SEL_B2 0x22D8
#define REG_UFS_DEBUG_SEL_B3 0x22DC
#define REG_UFS_MTK_SQD 0x2800
#define REG_UFS_MTK_SQIS 0x2814
#define REG_UFS_MTK_CQD 0x281C
#define REG_UFS_MTK_CQIS 0x2824
#define REG_UFS_MCQ_STRIDE 0x30
/*
* Ref-clk control
*
@ -136,6 +159,12 @@ struct ufs_mtk_hw_ver {
u8 major;
};
struct ufs_mtk_mcq_intr_info {
struct ufs_hba *hba;
u32 irq;
u8 qid;
};
struct ufs_mtk_host {
struct phy *mphy;
struct pm_qos_request pm_qos_req;
@ -155,6 +184,10 @@ struct ufs_mtk_host {
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
bool mcq_set_intr;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
/*

View file

@ -1297,11 +1297,14 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);