UPSTREAM: scsi: ufs: core: Remove dedicated hwq for dev command

This commit depends on "scsi: ufs: core: mcq: Fix the incorrect OCS value
for the device command" which takes care of the OCS value of dev commands
in MCQ mode.

It is safe to share first hwq for dev command and I/O request here.

Bug: 267974767
Tested-by: Po-Wen Kao <powen.kao@mediatek.com>
Signed-off-by: Po-Wen Kao <powen.kao@mediatek.com>
Link: https://lore.kernel.org/r/20230610021553.1213-3-powen.kao@mediatek.com
Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
(cherry picked from commit ccb23dc3435a0d9dbc07c5156a530a4aae6c851a)
Change-Id: I73f1ad5bec71ac6e9efec1fd5231754be92d1b7c
This commit is contained in:
Po-Wen Kao 2023-06-10 10:15:52 +08:00 committed by Matthias Männich
parent 2eb4158749
commit fcbb015efd
3 changed files with 6 additions and 13 deletions

View file

@ -20,12 +20,10 @@
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
#define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16
#define MAX_DEV_CMD_ENTRIES 2
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
@ -116,8 +114,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
/* uhq[0] is used to serve device commands */
return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
return &hba->uhq[hwq];
}
/**
@ -161,8 +158,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
/* maxq is 0 based value */
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
rw_queues;
tot_queues = read_queues + poll_queues + rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
@ -170,7 +166,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
return -EOPNOTSUPP;
}
rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
@ -196,7 +192,7 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
hba->nr_hw_queues = host->nr_hw_queues;
return 0;
}
@ -458,8 +454,6 @@ int ufshcd_mcq_init(struct ufs_hba *hba)
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
/* Give dev_cmd_queue the minimal number of entries */
hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
host->host_tagset = 1;
return 0;

View file

@ -84,7 +84,6 @@ int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp);
#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,

View file

@ -5596,7 +5596,7 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
@ -5650,7 +5650,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag);
hwq = &hba->uhq[hwq_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
hwq = &hba->uhq[hwq_num];
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);