diff --git a/arch/arm64/configs/vendor/pineapple_GKI.config b/arch/arm64/configs/vendor/pineapple_GKI.config index dc4b0ffd96b5..bc6a5a6df102 100644 --- a/arch/arm64/configs/vendor/pineapple_GKI.config +++ b/arch/arm64/configs/vendor/pineapple_GKI.config @@ -221,7 +221,6 @@ CONFIG_QTI_DDR_COOLING_DEVICE=m CONFIG_QTI_DEVFREQ_CDEV=m CONFIG_QTI_GLINK_ADC=m CONFIG_QTI_HW_KEY_MANAGER=m -CONFIG_QTI_HW_MEMLAT_SCMI_CLIENT=m CONFIG_QTI_IOMMU_SUPPORT=m CONFIG_QTI_PMIC_EUSB2_REPEATER=m CONFIG_QTI_PMIC_GLINK=m @@ -232,7 +231,6 @@ CONFIG_QTI_PMU_SCMI_CLIENT=m CONFIG_QTI_QMI_COOLING_DEVICE=m CONFIG_QTI_QMI_SENSOR_V2=m CONFIG_QTI_SCMI_C1DCVS_PROTOCOL=m -CONFIG_QTI_SCMI_MEMLAT_PROTOCOL=m CONFIG_QTI_SCMI_PMU_PROTOCOL=m CONFIG_QTI_SYS_PM_VX=m CONFIG_QTI_THERMAL_LIMITS_DCVS=m diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 661f1c198d7d..02900d90851c 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -169,17 +169,6 @@ config QTI_SCMI_C1DCVS_PROTOCOL This driver defines the comands or message ID's used for this communication and also exposes the ops used by clients. -config QTI_SCMI_MEMLAT_PROTOCOL - tristate "Qualcomm Technologies, Inc. SCMI MEMLAT vendor Protocol" - depends on ARM || ARM64 || COMPILE_TEST - depends on ARM_SCMI_PROTOCOL && QCOM_CPUCP - help - System Control and Management Interface (SCMI) memlat vendor protocol. - This protocol provides interface to communicate with micro controller - which is executing the hw memlat governor. This driver defines the - commands or message ID's used for this communication and also exposes - the ops used by clients. - endif #ARM_SCMI_PROTOCOL config ARM_SCMI_POWER_DOMAIN diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 85a0c5f9c3ac..852ca1d71f5a 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -15,7 +15,6 @@ obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o obj-$(CONFIG_QTI_SCMI_PMU_PROTOCOL) += pmu_vendor.o obj-$(CONFIG_QTI_SCMI_C1DCVS_PROTOCOL) += c1dcvs_vendor.o -obj-$(CONFIG_QTI_SCMI_MEMLAT_PROTOCOL) += memlat_vendor.o obj-$(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) += qcom_scmi_vendor.o ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) diff --git a/drivers/firmware/arm_scmi/memlat_vendor.c b/drivers/firmware/arm_scmi/memlat_vendor.c deleted file mode 100644 index fbb83f209446..000000000000 --- a/drivers/firmware/arm_scmi/memlat_vendor.c +++ /dev/null @@ -1,390 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. - */ - -#include -#include "common.h" - -#define MAX_MAP_ENTRIES 14 - -#define SCMI_VENDOR_MSG_START (3) -#define SCMI_VENDOR_MSG_MODULE_START (16) -#define SCMI_MAX_RX_SIZE 128 -#define SCMI_MAX_GET_DATA_SIZE 124 -#define INVALID_IDX 0xFF - -enum scmi_memlat_protocol_cmd { - MEMLAT_SET_LOG_LEVEL = SCMI_VENDOR_MSG_START, - MEMLAT_FLUSH_LOGBUF, - MEMLAT_SET_MEM_GROUP = SCMI_VENDOR_MSG_MODULE_START, - MEMLAT_SET_MONITOR, - MEMLAT_SET_COMMON_EV_MAP, - MEMLAT_SET_GRP_EV_MAP, - MEMLAT_ADAPTIVE_LOW_FREQ, - MEMLAT_ADAPTIVE_HIGH_FREQ, - MEMLAT_GET_ADAPTIVE_CUR_FREQ, - MEMLAT_IPM_CEIL, - MEMLAT_FE_STALL_FLOOR, - MEMLAT_BE_STALL_FLOOR, - MEMLAT_WB_PCT, - MEMLAT_IPM_FILTER, - MEMLAT_FREQ_SCALE_PCT, - MEMLAT_FREQ_SCALE_CEIL_MHZ, - MEMLAT_FREQ_SCALE_FLOOR_MHZ, - MEMLAT_SAMPLE_MS, - MEMLAT_MON_FREQ_MAP, - MEMLAT_SET_MIN_FREQ, - MEMLAT_SET_MAX_FREQ, - MEMLAT_GET_CUR_FREQ, - MEMLAT_START_TIMER, - MEMLAT_STOP_TIMER, - MEMLAT_GET_TIMESTAMP, - MEMLAT_MAX_MSG -}; - -struct node_msg { - uint32_t cpumask; - uint32_t hw_type; - uint32_t mon_type; - uint32_t mon_idx; - char mon_name[MAX_NAME_LEN]; -}; - -struct scalar_param_msg { - uint32_t hw_type; - uint32_t mon_idx; - uint32_t val; -}; - -struct map_table { - uint32_t v1; - uint32_t v2; -}; - -struct map_param_msg { - uint32_t hw_type; - uint32_t mon_idx; - uint32_t nr_rows; - struct map_table tbl[MAX_MAP_ENTRIES]; -}; - -struct ev_map_msg { - uint32_t num_evs; - uint32_t hw_type; - uint8_t cid[MAX_EV_CNTRS]; -}; - -static int scmi_set_ev_map(const struct scmi_protocol_handle *ph, u32 hw_type, - void *buf, u32 msg_id, u32 num_evs) -{ - int ret, i = 0; - struct scmi_xfer *t; - struct ev_map_msg *msg; - uint8_t *src = buf; - - ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg), - &t); - if (ret) - return ret; - - msg = t->tx.buf; - msg->num_evs = cpu_to_le32(num_evs); - msg->hw_type = cpu_to_le32(hw_type); - - for (i = 0; i < num_evs; i++) - msg->cid[i] = src[i]; - - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -static int scmi_set_grp_map(const struct scmi_protocol_handle *ph, u32 hw_type, - void *buf, u32 num_evs) -{ - return scmi_set_ev_map(ph, hw_type, buf, MEMLAT_SET_GRP_EV_MAP, num_evs); -} - -static int scmi_set_common_map(const struct scmi_protocol_handle *ph, void *buf, u32 num_evs) -{ - return scmi_set_ev_map(ph, INVALID_IDX, buf, MEMLAT_SET_COMMON_EV_MAP, num_evs); -} - -static int scmi_set_memgrp_mon(const struct scmi_protocol_handle *ph, - u32 cpus_mpidr, u32 hw_type, u32 mon_type, - u32 mon_idx, const char *mon_name, u32 msg_id) -{ - int ret = 0; - struct scmi_xfer *t; - struct node_msg *msg; - - ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg), - &t); - if (ret) - return ret; - - msg = t->tx.buf; - msg->cpumask = cpu_to_le32(cpus_mpidr); - msg->hw_type = cpu_to_le32(hw_type); - msg->mon_type = cpu_to_le32(mon_type); - msg->mon_idx = cpu_to_le32(mon_idx); - if (mon_name) - snprintf(msg->mon_name, MAX_NAME_LEN, mon_name); - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -static int scmi_set_mon(const struct scmi_protocol_handle *ph, u32 cpus_mpidr, - u32 hw_type, u32 mon_type, u32 mon_idx, const char *mon_name) -{ - return scmi_set_memgrp_mon(ph, cpus_mpidr, hw_type, mon_type, - mon_idx, mon_name, MEMLAT_SET_MONITOR); -} - -static int scmi_set_mem_grp(const struct scmi_protocol_handle *ph, - u32 cpus_mpidr, u32 hw_type) -{ - return scmi_set_memgrp_mon(ph, cpus_mpidr, hw_type, 0, - 0, NULL, MEMLAT_SET_MEM_GROUP); -} - -static int scmi_freq_map(const struct scmi_protocol_handle *ph, u32 hw_type, - u32 mon_idx, u32 nr_rows, void *buf) -{ - int ret, i = 0; - struct scmi_xfer *t; - struct map_param_msg *msg; - struct map_table *tbl, *src = buf; - - if (nr_rows > MAX_MAP_ENTRIES) - return -EINVAL; - - ret = ph->xops->xfer_get_init(ph, MEMLAT_MON_FREQ_MAP, sizeof(*msg), - sizeof(*msg), &t); - if (ret) - return ret; - - msg = t->tx.buf; - msg->hw_type = cpu_to_le32(hw_type); - msg->mon_idx = cpu_to_le32(mon_idx); - msg->nr_rows = cpu_to_le32(nr_rows); - tbl = msg->tbl; - - for (i = 0; i < nr_rows; i++) { - tbl[i].v1 = cpu_to_le32(src[i].v1); - tbl[i].v2 = cpu_to_le32(src[i].v2); - } - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -static int scmi_set_tunable(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 msg_id, u32 mon_idx, u32 val) -{ - int ret = 0; - struct scmi_xfer *t; - struct scalar_param_msg *msg; - - ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), - sizeof(*msg), &t); - if (ret) - return ret; - msg = t->tx.buf; - msg->hw_type = cpu_to_le32(hw_type); - msg->mon_idx = cpu_to_le32(mon_idx); - msg->val = cpu_to_le32(val); - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -#define scmi_send_cmd(name, _msg_id) \ -static int scmi_##name(const struct scmi_protocol_handle *ph, \ - u32 hw_type, u32 mon_idx, u32 val) \ -{ \ - return scmi_set_tunable(ph, hw_type, _msg_id, mon_idx, val); \ -} \ - -scmi_send_cmd(ipm_ceil, MEMLAT_IPM_CEIL); -scmi_send_cmd(fe_stall_floor, MEMLAT_FE_STALL_FLOOR); -scmi_send_cmd(be_stall_floor, MEMLAT_BE_STALL_FLOOR); -scmi_send_cmd(wb_pct_thres, MEMLAT_WB_PCT); -scmi_send_cmd(wb_filter_ipm, MEMLAT_IPM_FILTER); -scmi_send_cmd(freq_scale_pct, MEMLAT_FREQ_SCALE_PCT); -scmi_send_cmd(freq_scale_ceil_mhz, MEMLAT_FREQ_SCALE_CEIL_MHZ); -scmi_send_cmd(freq_scale_floor_mhz, MEMLAT_FREQ_SCALE_FLOOR_MHZ); -scmi_send_cmd(min_freq, MEMLAT_SET_MIN_FREQ); -scmi_send_cmd(max_freq, MEMLAT_SET_MAX_FREQ); -scmi_send_cmd(adaptive_low_freq, MEMLAT_ADAPTIVE_LOW_FREQ); -scmi_send_cmd(adaptive_high_freq, MEMLAT_ADAPTIVE_HIGH_FREQ); - -static int scmi_send_start_stop(const struct scmi_protocol_handle *ph, u32 msg_id) -{ - int ret = 0; - struct scmi_xfer *t; - - ret = ph->xops->xfer_get_init(ph, msg_id, 0, 0, &t); - if (ret) - return ret; - - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -static int scmi_stop_timer(const struct scmi_protocol_handle *ph) -{ - return scmi_send_start_stop(ph, MEMLAT_STOP_TIMER); -} - -static int scmi_start_timer(const struct scmi_protocol_handle *ph) -{ - return scmi_send_start_stop(ph, MEMLAT_START_TIMER); -} - -static int scmi_flush_cpucp_log(const struct scmi_protocol_handle *ph) -{ - return scmi_send_start_stop(ph, MEMLAT_FLUSH_LOGBUF); -} - -static int scmi_set_global_var(const struct scmi_protocol_handle *ph, u32 val, u32 msg_id) -{ - int ret = 0; - struct scmi_xfer *t; - u32 *ptr; - - ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(u32), sizeof(u32), &t); - if (ret) - return ret; - ptr = (u32 *)t->tx.buf; - *ptr = cpu_to_le32(val); - ret = ph->xops->do_xfer(ph, t); - ph->xops->xfer_put(ph, t); - - return ret; -} - -static int scmi_set_log_level(const struct scmi_protocol_handle *ph, u32 val) -{ - return scmi_set_global_var(ph, val, MEMLAT_SET_LOG_LEVEL); -} - -static int scmi_set_sample_ms(const struct scmi_protocol_handle *ph, u32 val) -{ - return scmi_set_global_var(ph, val, MEMLAT_SAMPLE_MS); -} - -static int scmi_get_timestamp(const struct scmi_protocol_handle *ph, void *buf) -{ - int ret = 0; - struct scmi_xfer *t; - - ret = ph->xops->xfer_get_init(ph, MEMLAT_GET_TIMESTAMP, sizeof(u32), - SCMI_MAX_RX_SIZE, &t); - if (ret) - return ret; - - ret = ph->xops->do_xfer(ph, t); - if (t->rx.len != sizeof(u64)) - return -EMSGSIZE; - - memcpy(buf, t->rx.buf, t->rx.len); - ph->xops->xfer_put(ph, t); - return ret; -} - -static int scmi_get_freq(const struct scmi_protocol_handle *ph, uint32_t hw_type, - uint32_t mon_idx, void *buf, uint32_t msg_id) -{ - int ret = 0; - struct scmi_xfer *t; - struct scalar_param_msg *msg; - - ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), - SCMI_MAX_RX_SIZE, &t); - if (ret) - return ret; - - msg = t->tx.buf; - msg->hw_type = cpu_to_le32(hw_type); - msg->mon_idx = cpu_to_le32(mon_idx); - ret = ph->xops->do_xfer(ph, t); - if (t->rx.len != sizeof(u32)) - return -EMSGSIZE; - - memcpy(buf, t->rx.buf, t->rx.len); - ph->xops->xfer_put(ph, t); - return ret; -} - -static int scmi_get_cur_freq(const struct scmi_protocol_handle *ph, uint32_t hw_type, - uint32_t mon_idx, void *buf) -{ - return scmi_get_freq(ph, hw_type, mon_idx, buf, MEMLAT_GET_CUR_FREQ); -} - -static int scmi_get_adaptive_cur_freq(const struct scmi_protocol_handle *ph, uint32_t hw_type, - uint32_t mon_idx, void *buf) -{ - return scmi_get_freq(ph, hw_type, mon_idx, buf, MEMLAT_GET_ADAPTIVE_CUR_FREQ); -} - -static struct scmi_memlat_vendor_ops memlat_proto_ops = { - .set_mem_grp = scmi_set_mem_grp, - .freq_map = scmi_freq_map, - .set_mon = scmi_set_mon, - .set_common_ev_map = scmi_set_common_map, - .set_grp_ev_map = scmi_set_grp_map, - .adaptive_low_freq = scmi_adaptive_low_freq, - .adaptive_high_freq = scmi_adaptive_high_freq, - .get_adaptive_cur_freq = scmi_get_adaptive_cur_freq, - .ipm_ceil = scmi_ipm_ceil, - .fe_stall_floor = scmi_fe_stall_floor, - .be_stall_floor = scmi_be_stall_floor, - .sample_ms = scmi_set_sample_ms, - .wb_filter_ipm = scmi_wb_filter_ipm, - .wb_pct_thres = scmi_wb_pct_thres, - .freq_scale_pct = scmi_freq_scale_pct, - .freq_scale_ceil_mhz = scmi_freq_scale_ceil_mhz, - .freq_scale_floor_mhz = scmi_freq_scale_floor_mhz, - .min_freq = scmi_min_freq, - .max_freq = scmi_max_freq, - .get_cur_freq = scmi_get_cur_freq, - .start_timer = scmi_start_timer, - .stop_timer = scmi_stop_timer, - .set_log_level = scmi_set_log_level, - .flush_cpucp_log = scmi_flush_cpucp_log, - .get_timestamp = scmi_get_timestamp, -}; - -static int scmi_memlat_vendor_protocol_init(const struct scmi_protocol_handle *ph) -{ - u32 version; - - ph->xops->version_get(ph, &version); - - dev_dbg(ph->dev, "memlat version %d.%d\n", - PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); - - return 0; -} - -static const struct scmi_protocol scmi_memlat_vendor = { - .id = SCMI_PROTOCOL_MEMLAT, - .owner = THIS_MODULE, - .instance_init = &scmi_memlat_vendor_protocol_init, - .ops = &memlat_proto_ops, -}; -module_scmi_protocol(scmi_memlat_vendor); - -MODULE_DESCRIPTION("SCMI memlat vendor Protocol"); -MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/pmu_vendor.c b/drivers/firmware/arm_scmi/pmu_vendor.c index 0becebf89d1d..f465286c4580 100644 --- a/drivers/firmware/arm_scmi/pmu_vendor.c +++ b/drivers/firmware/arm_scmi/pmu_vendor.c @@ -5,6 +5,7 @@ */ #include +#include #include "common.h" enum scmi_c1dcvs_protocol_cmd { diff --git a/drivers/soc/qcom/dcvs/Kconfig b/drivers/soc/qcom/dcvs/Kconfig index f1dccb62cd70..59b2e05f2262 100644 --- a/drivers/soc/qcom/dcvs/Kconfig +++ b/drivers/soc/qcom/dcvs/Kconfig @@ -52,20 +52,6 @@ config QCOM_BWMON (memory) frequencies through the QCOM DCVS framework. This driver handles voting for DDR and LLCC. -config QTI_HW_MEMLAT_SCMI_CLIENT - tristate "Qualcomm Technologies Inc. SCMI client driver for HW MEMLAT" - depends on QCOM_MEMLAT && QTI_SCMI_MEMLAT_PROTOCOL - default n - help - SCMI client driver registers itself with SCMI framework for memlat - vendor protocol, and also registers with the memlat interface - driver. - - This driver delivers the memlat vendor protocol handle to interface - driver, and interface driver will use this handle to communicate with - memlat HW. - - config QTI_PMU_SCMI_CLIENT tristate "Qualcomm Technologies Inc. SCMI client driver for PMU" depends on QTI_SCMI_PMU_PROTOCOL && QCOM_PMU_LIB @@ -100,3 +86,13 @@ config QTI_C1DCVS_SCMI_CLIENT This driver delivers the cpudcvs protocol handle to interface driver, and interface driver will use this handle to communicate with cpucp. + +config QTI_C1DCVS_SCMI_V2 + tristate "Qualcomm Technologies Inc. SCMI client driver for C1 DCVS" + depends on QTI_SCMI_VENDOR_PROTOCOL + help + C1 DCVS driver to expose sysfs node to user space and + register with SCMI framework. + + This driver is used to expose sysfs interface to communicate + with cpucp for C1 DCVS based on SCMI consolidation. diff --git a/drivers/soc/qcom/dcvs/Makefile b/drivers/soc/qcom/dcvs/Makefile index 2fde2f63d1c7..1bae8ef509b8 100644 --- a/drivers/soc/qcom/dcvs/Makefile +++ b/drivers/soc/qcom/dcvs/Makefile @@ -8,5 +8,5 @@ obj-$(CONFIG_QCOM_MEMLAT) += memlat.o obj-$(CONFIG_QCOM_BWMON) += bwmon.o obj-$(CONFIG_QTI_PMU_SCMI_CLIENT) += pmu_scmi.o obj-$(CONFIG_QTI_C1DCVS_SCMI_CLIENT) += c1dcvs_scmi.o -obj-$(CONFIG_QTI_HW_MEMLAT_SCMI_CLIENT) += memlat_scmi.o obj-$(CONFIG_QTI_QCOM_SCMI_CLIENT) += qcom_scmi_client.o +obj-$(CONFIG_QTI_C1DCVS_SCMI_V2) += c1dcvs_scmi_v2.o diff --git a/drivers/soc/qcom/dcvs/c1dcvs_scmi_v2.c b/drivers/soc/qcom/dcvs/c1dcvs_scmi_v2.c new file mode 100644 index 000000000000..d2f570c78336 --- /dev/null +++ b/drivers/soc/qcom/dcvs/c1dcvs_scmi_v2.c @@ -0,0 +1,342 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct kobject c1dcvs_kobj; +static struct scmi_protocol_handle *ph; +static const struct qcom_scmi_vendor_ops *ops; +static unsigned int user_c1dcvs_en; +static unsigned int kernel_c1dcvs_en; +static DEFINE_MUTEX(c1dcvs_lock); +#define C1DCVS_ALGO_STR 0x433144435653 /* "C1DCVS" */ +struct scmi_device *sdev; + +struct qcom_c1dcvs_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, struct attribute *attr, + char *buf); + ssize_t (*store)(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count); +}; + +enum cpucp_profiling_param_ids { + PARAM_ENABLE_C1DCVS = 1, + PARAM_ENABLE_TRACE, + PARAM_IPC_THRESH, + PARAM_EFREQ_THRESH, + PARAM_HYSTERESIS, + PARAM_C1DCVS_OPT_MODE, +}; + +static int set_enable_c1dcvs(void *buf) +{ + return ops->start_activity(ph, buf, C1DCVS_ALGO_STR, PARAM_ENABLE_C1DCVS, sizeof(u32)); +} + +static int set_c1dcvs_opt_mode(void *buf) +{ + return ops->set_param(ph, buf, C1DCVS_ALGO_STR, PARAM_C1DCVS_OPT_MODE, sizeof(u32)); +} + +static int set_enable_trace(void *buf) +{ + return ops->start_activity(ph, buf, C1DCVS_ALGO_STR, PARAM_ENABLE_TRACE, sizeof(u32)); +} + +static int set_ipc_thresh(void *buf, size_t rx_size) +{ + return ops->set_param(ph, buf, C1DCVS_ALGO_STR, PARAM_IPC_THRESH, rx_size); +} + +static int set_efreq_thresh(void *buf, size_t rx_size) +{ + return ops->set_param(ph, buf, C1DCVS_ALGO_STR, PARAM_EFREQ_THRESH, rx_size); +} + +static int set_hysteresis(void *buf) +{ + return ops->set_param(ph, buf, C1DCVS_ALGO_STR, PARAM_HYSTERESIS, sizeof(u32)); +} + +static int get_enable_c1dcvs(void *buf) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_ENABLE_C1DCVS, 0, sizeof(u32)); +} + +static int get_enable_trace(void *buf) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_ENABLE_TRACE, 0, sizeof(u32)); +} + +static int get_ipc_thresh(void *buf, size_t rx_size) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_IPC_THRESH, 0, rx_size); +} + +static int get_efreq_thresh(void *buf, size_t rx_size) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_EFREQ_THRESH, 0, rx_size); +} + +static int get_hysteresis(void *buf) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_HYSTERESIS, 0, sizeof(u32)); +} +static int get_c1dcvs_opt_mode(void *buf) +{ + return ops->get_param(ph, buf, C1DCVS_ALGO_STR, PARAM_C1DCVS_OPT_MODE, 0, sizeof(u32)); +} +#define to_c1dcvs_attr(_attr) \ + container_of(_attr, struct qcom_c1dcvs_attr, attr) +#define C1DCVS_ATTR_RW(_name) \ +static struct qcom_c1dcvs_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) \ + +#define store_c1dcvs_attr(name) \ +static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, const char *buf,\ + size_t count) \ +{ \ + unsigned int var; \ + int ret; \ + ret = kstrtouint(buf, 10, &var); \ + if (ret < 0) \ + return ret; \ + ret = set_##name(&var); \ + return ((ret < 0) ? ret : count); \ +} \ + +#define show_c1dcvs_attr(name) \ +static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ +{ \ + unsigned int var; \ + int ret; \ + \ + ret = get_##name(&var); \ + if (ret < 0) \ + return ret; \ + \ + return scnprintf(buf, PAGE_SIZE, "%lu\n", le32_to_cpu(var)); \ +} \ + +/* + * Must hold c1dcvs_lock before calling this function + */ +static int update_enable_c1dcvs(void) +{ + unsigned int enable = min(user_c1dcvs_en, kernel_c1dcvs_en); + + return set_enable_c1dcvs(&enable); +} + +static ssize_t store_enable_c1dcvs(struct kobject *kobj, + struct attribute *attr, const char *buf, + size_t count) +{ + unsigned int var; + int ret; + + ret = kstrtouint(buf, 10, &var); + if (ret < 0) + return ret; + + mutex_lock(&c1dcvs_lock); + user_c1dcvs_en = var; + ret = update_enable_c1dcvs(); + mutex_unlock(&c1dcvs_lock); + + return ((ret < 0) ? ret : count); +} + +int c1dcvs_enable(bool enable) +{ + unsigned int data = enable ? 1 : 0; + int ret; + + if (IS_ERR(ops)) + return -EPROBE_DEFER; + + mutex_lock(&c1dcvs_lock); + kernel_c1dcvs_en = data; + ret = update_enable_c1dcvs(); + mutex_unlock(&c1dcvs_lock); + + return ret; +} +EXPORT_SYMBOL(c1dcvs_enable); + +store_c1dcvs_attr(enable_trace); +show_c1dcvs_attr(enable_trace); +C1DCVS_ATTR_RW(enable_trace); +store_c1dcvs_attr(hysteresis); +show_c1dcvs_attr(hysteresis); +C1DCVS_ATTR_RW(hysteresis); +store_c1dcvs_attr(c1dcvs_opt_mode); +show_c1dcvs_attr(c1dcvs_opt_mode); +C1DCVS_ATTR_RW(c1dcvs_opt_mode); +show_c1dcvs_attr(enable_c1dcvs); +C1DCVS_ATTR_RW(enable_c1dcvs); + +#define store_c1dcvs_thresh(name) \ +static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, const char *buf,\ + size_t count) \ +{ \ + int ret, i = 0; \ + char *s = kstrdup(buf, GFP_KERNEL); \ + unsigned int msg[2] = {0}; \ + char *str; \ + \ + while (((str = strsep(&s, " ")) != NULL) && i < 2) { \ + ret = kstrtouint(str, 10, &msg[i]); \ + if (ret < 0) { \ + pr_err("Invalid value :%d\n", ret); \ + goto out; \ + } \ + i++; \ + } \ + \ + pr_info("Input threshold :%lu for cluster :%lu\n", msg[1], msg[0]);\ + ret = set_##name(msg, sizeof(msg)); \ +out: \ + kfree(s); \ + return ((ret < 0) ? ret : count); \ +} \ + +#define show_c1dcvs_thresh(name) \ +static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ +{ \ + unsigned int *vars = NULL; \ + int i, ret, tot = 0; \ + \ + vars = kcalloc(num_possible_cpus(), sizeof(unsigned int), GFP_KERNEL);\ + if (!vars) \ + return -ENOMEM; \ + ret = get_##name(vars, num_possible_cpus() * \ + sizeof(unsigned int)); \ + if (ret < 0) { \ + kfree(vars); \ + return ret; \ + } \ + for (i = 0; i < num_possible_cpus(); i++) { \ + vars[i] = le32_to_cpu(vars[i]); \ + if (!vars[i]) \ + break; \ + tot += scnprintf(buf + tot, PAGE_SIZE - tot, "%lu\t", vars[i]);\ + } \ + tot += scnprintf(buf + tot, PAGE_SIZE - tot, "\n"); \ + \ + kfree(vars); \ + return tot; \ +} \ + +store_c1dcvs_thresh(ipc_thresh); +show_c1dcvs_thresh(ipc_thresh); +C1DCVS_ATTR_RW(ipc_thresh); +store_c1dcvs_thresh(efreq_thresh); +show_c1dcvs_thresh(efreq_thresh); +C1DCVS_ATTR_RW(efreq_thresh); + +static struct attribute *c1dcvs_settings_attrs[] = { + &enable_c1dcvs.attr, + &enable_trace.attr, + &ipc_thresh.attr, + &efreq_thresh.attr, + &hysteresis.attr, + &c1dcvs_opt_mode.attr, + NULL, +}; +ATTRIBUTE_GROUPS(c1dcvs_settings); + +static ssize_t attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct qcom_c1dcvs_attr *c1dcvs_attr = to_c1dcvs_attr(attr); + ssize_t ret = -EIO; + + if (c1dcvs_attr->show) + ret = c1dcvs_attr->show(kobj, attr, buf); + + return ret; +} + +static ssize_t attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t count) +{ + struct qcom_c1dcvs_attr *c1dcvs_attr = to_c1dcvs_attr(attr); + ssize_t ret = -EIO; + + if (c1dcvs_attr->store) + ret = c1dcvs_attr->store(kobj, attr, buf, count); + + return ret; +} + +static const struct sysfs_ops c1dcvs_sysfs_ops = { + .show = attr_show, + .store = attr_store, +}; +static struct kobj_type c1dcvs_settings_ktype = { + .sysfs_ops = &c1dcvs_sysfs_ops, + .default_groups = c1dcvs_settings_groups, +}; + +static int scmi_c1dcvs_probe(struct platform_device *pdev) +{ + int ret; + + sdev = get_qcom_scmi_device(); + if (IS_ERR(sdev)) { + ret = PTR_ERR(sdev); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Error getting scmi_dev ret = %d\n", ret); + return ret; + } + ops = sdev->handle->devm_protocol_get(sdev, QCOM_SCMI_VENDOR_PROTOCOL, &ph); + if (IS_ERR(ops)) + return PTR_ERR(ops); + + ret = kobject_init_and_add(&c1dcvs_kobj, &c1dcvs_settings_ktype, + &cpu_subsys.dev_root->kobj, "c1dcvs"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to init c1 dcvs kobj: %d\n", ret); + kobject_put(&c1dcvs_kobj); + } + user_c1dcvs_en = kernel_c1dcvs_en = 1; + + return 0; +} + +static const struct of_device_id c1dcvs_v2[] = { + {.compatible = "qcom,c1dcvs-v2"}, + {}, +}; + +static struct platform_driver c1dcvs_v2_driver = { + .driver = { + .name = "c1dcvs-v2", + .of_match_table = c1dcvs_v2, + }, + .probe = scmi_c1dcvs_probe, +}; + + +module_platform_driver(c1dcvs_v2_driver); +MODULE_SOFTDEP("pre: qcom_scmi_client"); +MODULE_DESCRIPTION("Qcom SCMI C1DCVS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/dcvs/memlat.c b/drivers/soc/qcom/dcvs/memlat.c index a43b367c32e4..d82b23bcffb4 100644 --- a/drivers/soc/qcom/dcvs/memlat.c +++ b/drivers/soc/qcom/dcvs/memlat.c @@ -31,12 +31,73 @@ #include #include #include -#include #include +#include #include "trace-dcvs.h" #define MAX_MEMLAT_GRPS NUM_DCVS_HW_TYPES #define FP_NAME "memlat_fp" +#define MEMLAT_ALGO_STR 0x4D454D4C4154 /* "MEMLAT" */ +#define SCMI_VENDOR_MSG_START (3) +#define SCMI_VENDOR_MSG_MODULE_START (16) +#define INVALID_IDX 0xFF +#define MAX_NAME_LEN 20 +#define MAX_MAP_ENTRIES 12 + +enum scmi_memlat_protocol_cmd { + MEMLAT_SET_LOG_LEVEL = SCMI_VENDOR_MSG_START, + MEMLAT_FLUSH_LOGBUF, + MEMLAT_SET_MEM_GROUP = SCMI_VENDOR_MSG_MODULE_START, + MEMLAT_SET_MONITOR, + MEMLAT_SET_COMMON_EV_MAP, + MEMLAT_SET_GRP_EV_MAP, + MEMLAT_ADAPTIVE_LOW_FREQ, + MEMLAT_ADAPTIVE_HIGH_FREQ, + MEMLAT_GET_ADAPTIVE_CUR_FREQ, + MEMLAT_IPM_CEIL, + MEMLAT_FE_STALL_FLOOR, + MEMLAT_BE_STALL_FLOOR, + MEMLAT_WB_PCT, + MEMLAT_IPM_FILTER, + MEMLAT_FREQ_SCALE_PCT, + MEMLAT_FREQ_SCALE_CEIL_MHZ, + MEMLAT_FREQ_SCALE_FLOOR_MHZ, + MEMLAT_SAMPLE_MS, + MEMLAT_MON_FREQ_MAP, + MEMLAT_SET_MIN_FREQ, + MEMLAT_SET_MAX_FREQ, + MEMLAT_GET_CUR_FREQ, + MEMLAT_START_TIMER, + MEMLAT_STOP_TIMER, + MEMLAT_GET_TIMESTAMP, + MEMLAT_MAX_MSG +}; + +struct map_table { + uint16_t v1; + uint16_t v2; +}; + +struct map_param_msg { + uint32_t hw_type; + uint32_t mon_idx; + uint32_t nr_rows; + struct map_table tbl[MAX_MAP_ENTRIES]; +} __packed; + +struct node_msg { + uint32_t cpumask; + uint32_t hw_type; + uint32_t mon_type; + uint32_t mon_idx; + char mon_name[MAX_NAME_LEN]; +}; + +struct scalar_param_msg { + uint32_t hw_type; + uint32_t mon_idx; + uint32_t val; +}; enum common_ev_idx { INST_IDX, @@ -53,6 +114,12 @@ enum grp_ev_idx { NUM_GRP_EVS }; +struct ev_map_msg { + uint32_t num_evs; + uint32_t hw_type; + uint8_t cid[NUM_COMMON_EVS]; +}; + enum mon_type { SAMPLING_MON = BIT(0), THREADLAT_MON = BIT(1), @@ -168,7 +235,7 @@ struct memlat_dev_data { bool sampling_enabled; bool inited; /* CPUCP related struct fields */ - const struct scmi_memlat_vendor_ops *memlat_ops; + const struct qcom_scmi_vendor_ops *ops; struct scmi_protocol_handle *ph; u32 cpucp_sample_ms; u32 cpucp_log_level; @@ -207,25 +274,29 @@ static ssize_t show_##name(struct kobject *kobj, \ return scnprintf(buf, PAGE_SIZE, "%u\n", mon->name); \ } \ -#define store_attr(name, _min, _max) \ +#define store_attr(name, _min, _max, param_id) \ static ssize_t store_##name(struct kobject *kobj, \ struct attribute *attr, const char *buf, \ size_t count) \ { \ int ret; \ unsigned int val; \ + struct scalar_param_msg msg; \ struct memlat_mon *mon = to_memlat_mon(kobj); \ struct memlat_group *grp = mon->memlat_grp; \ - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; \ + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; \ ret = kstrtouint(buf, 10, &val); \ if (ret < 0) \ return ret; \ val = max(val, _min); \ val = min(val, _max); \ mon->name = val; \ - if (mon->type == CPUCP_MON && ops) { \ - ret = ops->name(memlat_data->ph, grp->hw_type, \ - mon->index, mon->name); \ + if ((mon->type & CPUCP_MON) && ops) { \ + msg.hw_type = grp->hw_type; \ + msg.mon_idx = mon->index; \ + msg.val = val; \ + ret = ops->set_param(memlat_data->ph, &msg, MEMLAT_ALGO_STR, \ + param_id, sizeof(msg)); \ if (ret < 0) { \ pr_err("failed to set mon tunable :%d\n", ret); \ return ret; \ @@ -242,31 +313,35 @@ static ssize_t show_##name(struct kobject *kobj, \ return scnprintf(buf, PAGE_SIZE, "%u\n", grp->name); \ } \ -#define store_grp_attr(name, _min, _max) \ -static ssize_t store_##name(struct kobject *kobj, \ - struct attribute *attr, const char *buf, \ - size_t count) \ -{ \ - int ret; \ - unsigned int val; \ - struct memlat_group *grp = to_memlat_grp(kobj); \ - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; \ - ret = kstrtouint(buf, 10, &val); \ - if (ret < 0) \ - return ret; \ - val = max(val, _min); \ - val = min(val, _max); \ - grp->name = val; \ - if (grp->cpucp_enabled && ops) { \ - ret = ops->name(memlat_data->ph, grp->hw_type, \ - 0, grp->name); \ - if (ret < 0) { \ - pr_err("failed to set grp tunable :%d\n", ret); \ - return ret; \ - } \ - } \ - return count; \ -} \ +#define store_grp_attr(name, _min, _max, param_id) \ +static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, const char *buf, \ + size_t count) \ +{ \ + int ret; \ + unsigned int val; \ + struct scalar_param_msg msg; \ + struct memlat_group *grp = to_memlat_grp(kobj); \ + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; \ + ret = kstrtouint(buf, 10, &val); \ + if (ret < 0) \ + return ret; \ + val = max(val, _min); \ + val = min(val, _max); \ + grp->name = val; \ + if (grp->cpucp_enabled && ops) { \ + msg.hw_type = grp->hw_type; \ + msg.mon_idx = 0; \ + msg.val = val; \ + ret = ops->set_param(memlat_data->ph, &msg, MEMLAT_ALGO_STR, \ + param_id, sizeof(msg)); \ + if (ret < 0) { \ + pr_err("failed to set grp tunable :%d\n", ret); \ + return ret; \ + } \ + } \ + return count; \ +} static ssize_t store_min_freq(struct kobject *kobj, struct attribute *attr, const char *buf, @@ -276,7 +351,8 @@ static ssize_t store_min_freq(struct kobject *kobj, unsigned int freq; struct memlat_mon *mon = to_memlat_mon(kobj); struct memlat_group *grp = mon->memlat_grp; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct scalar_param_msg msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; ret = kstrtouint(buf, 10, &freq); if (ret < 0) @@ -285,9 +361,12 @@ static ssize_t store_min_freq(struct kobject *kobj, freq = min(freq, mon->max_freq); mon->min_freq = freq; - if (mon->type == CPUCP_MON && ops) { - ret = ops->min_freq(memlat_data->ph, grp->hw_type, - mon->index, mon->min_freq); + if ((mon->type & CPUCP_MON) && ops) { + msg.hw_type = grp->hw_type; + msg.mon_idx = mon->index; + msg.val = mon->min_freq; + ret = ops->set_param(memlat_data->ph, + &msg, MEMLAT_ALGO_STR, MEMLAT_SET_MIN_FREQ, sizeof(msg)); if (ret < 0) { pr_err("failed to set min_freq :%d\n", ret); return ret; @@ -305,18 +384,21 @@ static ssize_t store_max_freq(struct kobject *kobj, unsigned int freq; struct memlat_mon *mon = to_memlat_mon(kobj); struct memlat_group *grp = mon->memlat_grp; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct scalar_param_msg msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; ret = kstrtouint(buf, 10, &freq); if (ret < 0) return ret; freq = max(freq, mon->min_freq); freq = min(freq, mon->mon_max_freq); mon->max_freq = freq; - - if (mon->type == CPUCP_MON && ops) { - ret = ops->max_freq(memlat_data->ph, grp->hw_type, - mon->index, mon->max_freq); + if ((mon->type & CPUCP_MON) && ops) { + msg.hw_type = grp->hw_type; + msg.mon_idx = mon->index; + msg.val = mon->max_freq; + ret = ops->set_param(memlat_data->ph, + &msg, MEMLAT_ALGO_STR, MEMLAT_SET_MAX_FREQ, sizeof(msg)); if (ret < 0) { pr_err("failed to set max_freq :%d\n", ret); return ret; @@ -378,9 +460,8 @@ static ssize_t store_cpucp_sample_ms(struct kobject *kobj, { int ret, i; unsigned int val; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; struct memlat_group *grp; - if (!ops) return -ENODEV; @@ -397,8 +478,8 @@ static ssize_t store_cpucp_sample_ms(struct kobject *kobj, return ret; val = max(val, MIN_SAMPLE_MS); val = min(val, MAX_SAMPLE_MS); - - ret = ops->sample_ms(memlat_data->ph, val); + ret = ops->set_param(memlat_data->ph, &val, + MEMLAT_ALGO_STR, MEMLAT_SAMPLE_MS, sizeof(val)); if (ret < 0) { pr_err("Failed to set cpucp sample ms :%d\n", ret); return ret; @@ -420,7 +501,7 @@ static ssize_t store_cpucp_log_level(struct kobject *kobj, { int ret, i; unsigned int val; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; struct memlat_group *grp; if (!ops) @@ -437,8 +518,8 @@ static ssize_t store_cpucp_log_level(struct kobject *kobj, ret = kstrtouint(buf, 10, &val); if (ret < 0) return ret; - - ret = ops->set_log_level(memlat_data->ph, val); + ret = ops->set_param(memlat_data->ph, &val, + MEMLAT_ALGO_STR, MEMLAT_SET_LOG_LEVEL, sizeof(val)); if (ret < 0) { pr_err("failed to configure log_level, ret = %d\n", ret); return ret; @@ -459,12 +540,12 @@ static ssize_t store_flush_cpucp_log(struct kobject *kobj, size_t count) { int ret; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; if (!ops) return -ENODEV; - - ret = ops->flush_cpucp_log(memlat_data->ph); + ret = ops->set_param(memlat_data->ph, 0, + MEMLAT_ALGO_STR, MEMLAT_FLUSH_LOGBUF, 0); if (ret < 0) { pr_err("failed to flush cpucp log, ret = %d\n", ret); return ret; @@ -483,13 +564,13 @@ static ssize_t show_hlos_cpucp_offset(struct kobject *kobj, struct attribute *attr, char *buf) { int ret; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; uint64_t cpucp_ts, hlos_ts; if (!ops) return -ENODEV; - - ret = ops->get_timestamp(memlat_data->ph, &cpucp_ts); + ret = ops->get_param(memlat_data->ph, &cpucp_ts, + MEMLAT_ALGO_STR, MEMLAT_GET_TIMESTAMP, 0, sizeof(cpucp_ts)); if (ret < 0) { pr_err("failed to get cpucp timestamp\n"); return ret; @@ -503,30 +584,35 @@ static ssize_t show_hlos_cpucp_offset(struct kobject *kobj, static ssize_t show_cur_freq(struct kobject *kobj, struct attribute *attr, char *buf) { - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct scalar_param_msg msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; struct memlat_mon *mon = to_memlat_mon(kobj); struct memlat_group *grp = mon->memlat_grp; uint32_t cur_freq; int ret; - if (mon->type != CPUCP_MON) + if (!grp->cpucp_enabled) return scnprintf(buf, PAGE_SIZE, "%lu\n", mon->cur_freq); if (!ops) return -ENODEV; - - ret = ops->get_cur_freq(memlat_data->ph, grp->hw_type, mon->index, &cur_freq); + msg.hw_type = grp->hw_type; + msg.mon_idx = mon->index; + ret = ops->get_param(memlat_data->ph, &msg, + MEMLAT_ALGO_STR, MEMLAT_GET_CUR_FREQ, sizeof(msg), sizeof(cur_freq)); if (ret < 0) { pr_err("failed to get mon current frequency\n"); return ret; } + memcpy(&cur_freq, (void *)&msg, sizeof(cur_freq)); return scnprintf(buf, PAGE_SIZE, "%lu\n", le32_to_cpu(cur_freq)); } static ssize_t show_adaptive_cur_freq(struct kobject *kobj, struct attribute *attr, char *buf) { - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct scalar_param_msg msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; uint32_t adaptive_cur_freq; struct memlat_group *grp = to_memlat_grp(kobj); int ret; @@ -536,39 +622,42 @@ static ssize_t show_adaptive_cur_freq(struct kobject *kobj, if (!ops) return -ENODEV; - - ret = ops->get_adaptive_cur_freq(memlat_data->ph, grp->hw_type, 0, &adaptive_cur_freq); + msg.hw_type = grp->hw_type; + msg.mon_idx = 0; + ret = ops->get_param(memlat_data->ph, &msg, MEMLAT_ALGO_STR, + MEMLAT_GET_ADAPTIVE_CUR_FREQ, sizeof(msg), sizeof(adaptive_cur_freq)); if (ret < 0) { pr_err("failed to get grp adaptive current frequency\n"); return ret; } + memcpy(&adaptive_cur_freq, &msg, sizeof(adaptive_cur_freq)); return scnprintf(buf, PAGE_SIZE, "%lu\n", le32_to_cpu(adaptive_cur_freq)); } show_grp_attr(sampling_cur_freq); show_grp_attr(adaptive_high_freq); -store_grp_attr(adaptive_high_freq, 0U, 8000000U); +store_grp_attr(adaptive_high_freq, 0U, 8000000U, MEMLAT_ADAPTIVE_HIGH_FREQ); show_grp_attr(adaptive_low_freq); -store_grp_attr(adaptive_low_freq, 0U, 8000000U); +store_grp_attr(adaptive_low_freq, 0U, 8000000U, MEMLAT_ADAPTIVE_LOW_FREQ); show_attr(min_freq); show_attr(max_freq); show_attr(ipm_ceil); -store_attr(ipm_ceil, 1U, 50000U); +store_attr(ipm_ceil, 1U, 50000U, MEMLAT_IPM_CEIL); show_attr(fe_stall_floor); -store_attr(fe_stall_floor, 0U, 100U); +store_attr(fe_stall_floor, 0U, 100U, MEMLAT_FE_STALL_FLOOR); show_attr(be_stall_floor); -store_attr(be_stall_floor, 0U, 100U); +store_attr(be_stall_floor, 0U, 100U, MEMLAT_BE_STALL_FLOOR); show_attr(freq_scale_pct); -store_attr(freq_scale_pct, 0U, 1000U); +store_attr(freq_scale_pct, 0U, 1000U, MEMLAT_FREQ_SCALE_PCT); show_attr(wb_pct_thres); -store_attr(wb_pct_thres, 0U, 100U); +store_attr(wb_pct_thres, 0U, 100U, MEMLAT_WB_PCT); show_attr(wb_filter_ipm); -store_attr(wb_filter_ipm, 0U, 50000U); -store_attr(freq_scale_ceil_mhz, 0U, 5000U); +store_attr(wb_filter_ipm, 0U, 50000U, MEMLAT_IPM_FILTER); show_attr(freq_scale_ceil_mhz); -store_attr(freq_scale_floor_mhz, 0U, 5000U); +store_attr(freq_scale_ceil_mhz, 0U, 5000U, MEMLAT_FREQ_SCALE_CEIL_MHZ); show_attr(freq_scale_floor_mhz); +store_attr(freq_scale_floor_mhz, 0U, 5000U, MEMLAT_FREQ_SCALE_FLOOR_MHZ); MEMLAT_ATTR_RW(sample_ms); MEMLAT_ATTR_RW(cpucp_sample_ms); @@ -1268,9 +1357,12 @@ static inline bool should_enable_memlat_fp(void) return false; } +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) static int configure_cpucp_common_events(void) { - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + + struct ev_map_msg msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; int ret = 0, i, j = 0; u8 ev_map[NUM_COMMON_EVS]; @@ -1283,20 +1375,31 @@ static int configure_cpucp_common_events(void) if (ret >= 0 && ret < MAX_CPUCP_EVT) ev_map[j] = ret; } - - ret = ops->set_common_ev_map(memlat_data->ph, ev_map, NUM_COMMON_EVS); + msg.num_evs = NUM_COMMON_EVS; + msg.hw_type = INVALID_IDX; + for (i = 0; i < NUM_COMMON_EVS; i++) + msg.cid[i] = ev_map[i]; + ret = ops->set_param(memlat_data->ph, &msg, + MEMLAT_ALGO_STR, MEMLAT_SET_COMMON_EV_MAP, sizeof(msg)); return ret; } static int configure_cpucp_grp(struct memlat_group *grp) { - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct node_msg msg; + struct ev_map_msg ev_msg; + struct scalar_param_msg scaler_msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; int ret = 0, i, j = 0; struct device_node *of_node = grp->dev->of_node; u8 ev_map[NUM_GRP_EVS]; - ret = ops->set_mem_grp(memlat_data->ph, *cpumask_bits(cpu_possible_mask), - grp->hw_type); + msg.cpumask = *cpumask_bits(cpu_possible_mask); + msg.hw_type = grp->hw_type; + msg.mon_type = 0; + msg.mon_idx = 0; + ret = ops->set_param(memlat_data->ph, &msg, + MEMLAT_ALGO_STR, MEMLAT_SET_MEM_GROUP, sizeof(msg)); if (ret < 0) { pr_err("Failed to configure mem grp %s\n", of_node->name); return ret; @@ -1311,22 +1414,32 @@ static int configure_cpucp_grp(struct memlat_group *grp) if (ret >= 0 && ret < MAX_CPUCP_EVT) ev_map[j] = ret; } - - ret = ops->set_grp_ev_map(memlat_data->ph, grp->hw_type, ev_map, NUM_GRP_EVS); + ev_msg.num_evs = NUM_GRP_EVS; + ev_msg.hw_type = grp->hw_type; + for (i = 0; i < NUM_GRP_EVS; i++) + ev_msg.cid[i] = ev_map[i]; + ret = ops->set_param(memlat_data->ph, &ev_msg, + MEMLAT_ALGO_STR, MEMLAT_SET_GRP_EV_MAP, sizeof(ev_msg)); if (ret < 0) { pr_err("Failed to configure event map for mem grp %s\n", of_node->name); return ret; } - - ret = ops->adaptive_low_freq(memlat_data->ph, grp->hw_type, 0, grp->adaptive_low_freq); + scaler_msg.hw_type = grp->hw_type; + scaler_msg.mon_idx = 0; + scaler_msg.val = grp->adaptive_low_freq; + ret = ops->set_param(memlat_data->ph, &scaler_msg, + MEMLAT_ALGO_STR, MEMLAT_ADAPTIVE_LOW_FREQ, sizeof(scaler_msg)); if (ret < 0) { pr_err("Failed to configure grp adaptive low freq for mem grp %s\n", of_node->name); return ret; } - - ret = ops->adaptive_high_freq(memlat_data->ph, grp->hw_type, 0, grp->adaptive_high_freq); + scaler_msg.hw_type = grp->hw_type; + scaler_msg.mon_idx = 0; + scaler_msg.val = grp->adaptive_high_freq; + ret = ops->set_param(memlat_data->ph, &scaler_msg, + MEMLAT_ALGO_STR, MEMLAT_ADAPTIVE_HIGH_FREQ, sizeof(scaler_msg)); if (ret < 0) pr_err("Failed to configure grp adaptive high freq for mem grp %s\n", of_node->name); @@ -1336,117 +1449,158 @@ static int configure_cpucp_grp(struct memlat_group *grp) static int configure_cpucp_mon(struct memlat_mon *mon) { struct memlat_group *grp = mon->memlat_grp; - const struct scmi_memlat_vendor_ops *ops = memlat_data->memlat_ops; + struct node_msg msg; + struct scalar_param_msg scalar_msg; + struct map_param_msg map_msg; + const struct qcom_scmi_vendor_ops *ops = memlat_data->ops; + int i; struct device_node *of_node = mon->dev->of_node; int ret; const char c = ':'; - ret = ops->set_mon(memlat_data->ph, mon->cpus_mpidr, grp->hw_type, - mon->is_compute, mon->index, (strrchr(dev_name(mon->dev), c) + 1)); + msg.cpumask = mon->cpus_mpidr; + msg.hw_type = grp->hw_type; + msg.mon_type = mon->is_compute; + msg.mon_idx = mon->index; + if ((strrchr(dev_name(mon->dev), c) + 1)) + snprintf(msg.mon_name, MAX_NAME_LEN, (strrchr(dev_name(mon->dev), c) + 1)); + ret = ops->set_param(memlat_data->ph, &msg, + MEMLAT_ALGO_STR, MEMLAT_SET_MONITOR, sizeof(msg)); if (ret < 0) { pr_err("failed to configure monitor %s\n", of_node->name); return ret; } - - ret = ops->ipm_ceil(memlat_data->ph, grp->hw_type, mon->index, - mon->ipm_ceil); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->ipm_ceil; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_IPM_CEIL, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set ipm ceil for %s\n", of_node->name); return ret; } + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->fe_stall_floor; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_FE_STALL_FLOOR, sizeof(scalar_msg)); - ret = ops->fe_stall_floor(memlat_data->ph, grp->hw_type, mon->index, - mon->fe_stall_floor); if (ret < 0) { pr_err("failed to set fe stall floor for %s\n", of_node->name); return ret; } - - ret = ops->be_stall_floor(memlat_data->ph, grp->hw_type, mon->index, - mon->be_stall_floor); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->be_stall_floor; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_BE_STALL_FLOOR, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set be stall floor for %s\n", of_node->name); return ret; } - - ret = ops->wb_pct_thres(memlat_data->ph, grp->hw_type, mon->index, - mon->wb_pct_thres); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->wb_pct_thres; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_WB_PCT, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set wb pct for %s\n", of_node->name); return ret; } - - ret = ops->wb_filter_ipm(memlat_data->ph, grp->hw_type, mon->index, - mon->wb_filter_ipm); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->wb_filter_ipm; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_IPM_FILTER, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set wb filter ipm for %s\n", of_node->name); return ret; } - ret = ops->freq_scale_pct(memlat_data->ph, grp->hw_type, mon->index, - mon->freq_scale_pct); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->freq_scale_pct; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_FREQ_SCALE_PCT, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set freq_scale_pct for %s\n", of_node->name); return ret; } - - ret = ops->freq_scale_ceil_mhz(memlat_data->ph, grp->hw_type, mon->index, - mon->freq_scale_ceil_mhz); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->freq_scale_ceil_mhz; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_FREQ_SCALE_CEIL_MHZ, sizeof(scalar_msg)); if (ret < 0) { - pr_err("failed to set freq_scale_ceil for %s\n", of_node->name); + pr_err("failed to failed to set freq_scale_ceil on %s\n", of_node->name); + return ret; + } + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->freq_scale_floor_mhz; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_FREQ_SCALE_FLOOR_MHZ, sizeof(scalar_msg)); + if (ret < 0) { + pr_err("failed to failed to set freq_scale_floor on %s\n", of_node->name); return ret; } - ret = ops->freq_scale_floor_mhz(memlat_data->ph, grp->hw_type, mon->index, - mon->freq_scale_floor_mhz); - if (ret < 0) { - pr_err("failed to set freq_scale_floor on %s\n", of_node->name); - return ret; + map_msg.hw_type = grp->hw_type; + map_msg.mon_idx = mon->index; + map_msg.nr_rows = mon->freq_map_len; + for (i = 0; i < mon->freq_map_len; i++) { + map_msg.tbl[i].v1 = mon->freq_map[i].cpufreq_mhz; + if (mon->freq_map[i].memfreq_khz > 1000) + map_msg.tbl[i].v2 = mon->freq_map[i].memfreq_khz / 1000; + else + /* in case of DDRQOS, we do not want to divide by 1000 */ + map_msg.tbl[i].v2 = mon->freq_map[i].memfreq_khz; } - - ret = ops->freq_map(memlat_data->ph, grp->hw_type, mon->index, - mon->freq_map_len, mon->freq_map); + ret = ops->set_param(memlat_data->ph, &map_msg, + MEMLAT_ALGO_STR, MEMLAT_MON_FREQ_MAP, sizeof(map_msg)); if (ret < 0) { pr_err("failed to configure freq_map for %s\n", of_node->name); return ret; } - ret = ops->min_freq(memlat_data->ph, grp->hw_type, mon->index, - mon->min_freq); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->min_freq; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_SET_MIN_FREQ, sizeof(scalar_msg)); if (ret < 0) { pr_err("failed to set min_freq for %s\n", of_node->name); return ret; } - - ret = ops->max_freq(memlat_data->ph, grp->hw_type, mon->index, - mon->max_freq); + scalar_msg.hw_type = grp->hw_type; + scalar_msg.mon_idx = mon->index; + scalar_msg.val = mon->max_freq; + ret = ops->set_param(memlat_data->ph, &scalar_msg, + MEMLAT_ALGO_STR, MEMLAT_SET_MAX_FREQ, sizeof(scalar_msg)); if (ret < 0) pr_err("failed to set max_freq for %s\n", of_node->name); return ret; } -int cpucp_memlat_init(struct scmi_device *sdev) +static int cpucp_memlat_init(struct scmi_device *sdev) { int ret = 0, i, j; struct scmi_protocol_handle *ph; - const struct scmi_memlat_vendor_ops *ops; + const struct qcom_scmi_vendor_ops *ops; struct memlat_group *grp; bool start_cpucp_timer = false; - if (!memlat_data || !memlat_data->inited) - return -EPROBE_DEFER; - if (!sdev || !sdev->handle) return -EINVAL; - ops = sdev->handle->devm_protocol_get(sdev, SCMI_PROTOCOL_MEMLAT, &ph); + ops = sdev->handle->devm_protocol_get(sdev, QCOM_SCMI_VENDOR_PROTOCOL, &ph); if (IS_ERR(ops)) return PTR_ERR(ops); mutex_lock(&memlat_lock); memlat_data->ph = ph; - memlat_data->memlat_ops = ops; + memlat_data->ops = ops; /* Configure common events */ ret = configure_cpucp_common_events(); @@ -1466,45 +1620,41 @@ int cpucp_memlat_init(struct scmi_device *sdev) goto memlat_unlock; } - mutex_lock(&grp->mons_lock); for (j = 0; j < grp->num_inited_mons; j++) { - if (grp->mons[j].type != CPUCP_MON) + if (!grp->cpucp_enabled) continue; /* Configure per monitor parameters */ ret = configure_cpucp_mon(&grp->mons[j]); if (ret < 0) { pr_err("failed to configure mon: %d\n", ret); - goto mons_unlock; + goto memlat_unlock; } start_cpucp_timer = true; } - mutex_unlock(&grp->mons_lock); } + ret = ops->set_param(memlat_data->ph, &memlat_data->cpucp_sample_ms, + MEMLAT_ALGO_STR, MEMLAT_SAMPLE_MS, sizeof(memlat_data->cpucp_sample_ms)); - ret = ops->sample_ms(memlat_data->ph, memlat_data->cpucp_sample_ms); if (ret < 0) { - pr_err("failed to set cpucp sample_ms\n"); + pr_err("failed to set cpucp sample_ms ret = %d\n", ret); goto memlat_unlock; } /* Start sampling and voting timer */ if (start_cpucp_timer) { - ret = ops->start_timer(memlat_data->ph); + ret = ops->start_activity(memlat_data->ph, NULL, + MEMLAT_ALGO_STR, MEMLAT_START_TIMER, 0); if (ret < 0) pr_err("Error in starting the mem group timer %d\n", ret); } - goto memlat_unlock; -mons_unlock: - mutex_unlock(&grp->mons_lock); memlat_unlock: if (ret < 0) - memlat_data->memlat_ops = NULL; + memlat_data->ops = NULL; mutex_unlock(&memlat_lock); return ret; } -EXPORT_SYMBOL(cpucp_memlat_init); - +#endif #define INST_EV 0x08 #define CYC_EV 0x11 static int memlat_dev_probe(struct platform_device *pdev) @@ -1515,6 +1665,18 @@ static int memlat_dev_probe(struct platform_device *pdev) int i, cpu, ret; u32 event_id; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + struct scmi_device *scmi_dev; + + scmi_dev = get_qcom_scmi_device(); + if (IS_ERR(scmi_dev)) { + ret = PTR_ERR(scmi_dev); + if (ret == -EPROBE_DEFER) + return ret; + dev_err(dev, "Error getting scmi_dev ret = %d\n", ret); + } +#endif + dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return -ENOMEM; @@ -1749,6 +1911,10 @@ static int memlat_mon_probe(struct platform_device *pdev) struct memlat_mon *mon; struct device_node *of_node = dev->of_node; u32 num_cpus; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + int cpucp_ret = 0; + struct scmi_device *scmi_dev; +#endif memlat_grp = dev_get_drvdata(dev->parent); if (!memlat_grp) { @@ -1835,8 +2001,20 @@ static int memlat_mon_probe(struct platform_device *pdev) } mon->index = memlat_grp->num_inited_mons++; - if (memlat_grps_and_mons_inited()) + if (memlat_grps_and_mons_inited()) { memlat_data->inited = true; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + scmi_dev = get_qcom_scmi_device(); + if (IS_ERR(scmi_dev)) { + cpucp_ret = PTR_ERR(scmi_dev); + dev_err(dev, "get_qcom_scmi_device ret: %d\n", cpucp_ret); + } else { + cpucp_ret = cpucp_memlat_init(scmi_dev); + if (cpucp_ret < 0) + dev_err(dev, "Err during cpucp_memlat_init: %d\n", cpucp_ret); + } +#endif + } unlock_out: mutex_unlock(&memlat_grp->mons_lock); @@ -1910,5 +2088,8 @@ static struct platform_driver qcom_memlat_driver = { }; module_platform_driver(qcom_memlat_driver); +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) +MODULE_SOFTDEP("pre: qcom_scmi_client"); +#endif MODULE_DESCRIPTION("QCOM MEMLAT Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/dcvs/memlat_scmi.c b/drivers/soc/qcom/dcvs/memlat_scmi.c deleted file mode 100644 index b837a6088ee0..000000000000 --- a/drivers/soc/qcom/dcvs/memlat_scmi.c +++ /dev/null @@ -1,41 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -extern int cpucp_memlat_init(struct scmi_device *sdev); - -static int scmi_memlat_probe(struct scmi_device *sdev) -{ - if (!sdev) - return -ENODEV; - - return cpucp_memlat_init(sdev); -} - -static const struct scmi_device_id scmi_id_table[] = { - { .protocol_id = SCMI_PROTOCOL_MEMLAT, .name = "scmi_protocol_memlat" }, - { }, -}; -MODULE_DEVICE_TABLE(scmi, scmi_id_table); - -static struct scmi_driver scmi_memlat_drv = { - .name = "scmi-memlat-driver", - .probe = scmi_memlat_probe, - .id_table = scmi_id_table, -}; -module_scmi_driver(scmi_memlat_drv); - -MODULE_SOFTDEP("pre: memlat_vendor"); -MODULE_DESCRIPTION("ARM SCMI Memlat driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/dcvs/pmu_lib.c b/drivers/soc/qcom/dcvs/pmu_lib.c index 807d1f7d398a..e1b05831c468 100644 --- a/drivers/soc/qcom/dcvs/pmu_lib.c +++ b/drivers/soc/qcom/dcvs/pmu_lib.c @@ -36,6 +36,22 @@ #define INVALID_ID 0xFF static void __iomem *pmu_base; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) +#include +#define MAX_NUM_CPUS 8 +#define PMUMAP_ALGO_STR 0x504D554D4150 /* "PMUMAP" */ + +enum scmi_c1dcvs_protocol_cmd { + SET_PMU_MAP = 11, + SET_ENABLE_TRACE, + SET_ENABLE_CACHING, +}; + +struct pmu_map_msg { + uint8_t hw_cntrs[MAX_NUM_CPUS][MAX_CPUCP_EVT]; +}; +#endif + struct cpucp_pmu_ctrs { u32 evctrs[MAX_CPUCP_EVT]; u32 valid; @@ -70,7 +86,11 @@ static bool qcom_pmu_inited; static bool pmu_long_counter; static int cpuhp_state; static struct scmi_protocol_handle *ph; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) +static const struct qcom_scmi_vendor_ops *ops; +#else static const struct scmi_pmu_vendor_ops *ops; +#endif static LIST_HEAD(idle_notif_list); static DEFINE_SPINLOCK(idle_list_lock); static struct cpucp_hlos_map cpucp_map[MAX_CPUCP_EVT]; @@ -456,12 +476,16 @@ static int events_caching_enable(void) if (!qcom_pmu_inited) return -EPROBE_DEFER; - if (!ops || !pmu_base) return ret; - +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + ret = ops->set_param(ph, &enable, PMUMAP_ALGO_STR, + SET_ENABLE_CACHING, sizeof(enable)); +#else ret = ops->set_cache_enable(ph, &enable); - +#endif + if (ret < 0) + pr_err("failed to set cache enable tunable :%d\n", ret); return ret; } @@ -469,12 +493,14 @@ static int configure_cpucp_map(cpumask_t mask) { struct event_data *event; int i, cpu, ret = 0, cid; - u8 pmu_map[MAX_NUM_CPUS][MAX_CPUCP_EVT]; + uint8_t pmu_map[MAX_NUM_CPUS][MAX_CPUCP_EVT]; struct cpu_data *cpu_data; - +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + int j; + struct pmu_map_msg msg; +#endif if (!qcom_pmu_inited) return -EPROBE_DEFER; - if (!ops) return ret; @@ -496,8 +522,17 @@ static int configure_cpucp_map(cpumask_t mask) pmu_map[cpu][cid] = event->pevent->hw.idx; } } - +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + for (i = 0; i < MAX_NUM_CPUS; i++) { + for (j = 0; j < MAX_CPUCP_EVT; j++) + msg.hw_cntrs[i][j] = pmu_map[i][j]; + } + ret = ops->set_param(ph, &msg, PMUMAP_ALGO_STR, SET_PMU_MAP, sizeof(msg)); +#else ret = ops->set_pmu_map(ph, pmu_map); +#endif + if (ret < 0) + pr_err("failed to set pmu map :%d\n", ret); return ret; } @@ -874,14 +909,18 @@ static void load_pmu_counters(void) pr_info("Enabled all perf counters\n"); } -int rimps_pmu_init(struct scmi_device *sdev) +int cpucp_pmu_init(struct scmi_device *sdev) { int ret = 0; if (!sdev || !sdev->handle) return -EINVAL; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + ops = sdev->handle->devm_protocol_get(sdev, QCOM_SCMI_VENDOR_PROTOCOL, &ph); +#else ops = sdev->handle->devm_protocol_get(sdev, SCMI_PMU_PROTOCOL, &ph); +#endif if (!ops) return -EINVAL; @@ -901,7 +940,7 @@ int rimps_pmu_init(struct scmi_device *sdev) return ret; } -EXPORT_SYMBOL(rimps_pmu_init); +EXPORT_SYMBOL(cpucp_pmu_init); static int configure_pmu_event(u32 event_id, int amu_id, int cid, int cpu) { @@ -1039,9 +1078,15 @@ static ssize_t store_enable_trace(struct kobject *kobj, if (ret < 0) return ret; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + ret = ops->set_param(ph, &var, PMUMAP_ALGO_STR, SET_ENABLE_TRACE, sizeof(var)); +#else ret = ops->set_enable_trace(ph, &var); - if (ret < 0) +#endif + if (ret < 0) { + pr_err("failed to set enable_trace tunable: %d\n", ret); return ret; + } pmu_enable_trace = var; @@ -1099,7 +1144,18 @@ static int qcom_pmu_driver_probe(struct platform_device *pdev) unsigned int cpu; struct cpu_data *cpu_data; struct resource res; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + int cpucp_ret = 0; + struct scmi_device *scmi_dev; + scmi_dev = get_qcom_scmi_device(); + if (IS_ERR(scmi_dev)) { + ret = PTR_ERR(scmi_dev); + if (ret == -EPROBE_DEFER) + return ret; + dev_err(dev, "Error getting scmi_dev ret = %d\n", ret); + } +#endif if (!pmu_base) { idx = of_property_match_string(dev->of_node, "reg-names", "pmu-base"); if (idx < 0) { @@ -1155,6 +1211,13 @@ skip_pmu: } qcom_pmu_inited = true; +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) + if (!IS_ERR(scmi_dev)) { + cpucp_ret = cpucp_pmu_init(scmi_dev); + if (cpucp_ret < 0) + dev_err(dev, "Err during cpucp_pmu_init ret = %d\n", cpucp_ret); + } +#endif return ret; } @@ -1182,5 +1245,8 @@ static struct platform_driver qcom_pmu_driver = { }; module_platform_driver(qcom_pmu_driver); +#if IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) +MODULE_SOFTDEP("pre: qcom_scmi_client"); +#endif MODULE_DESCRIPTION("QCOM PMU Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/soc/qcom/dcvs/pmu_scmi.c b/drivers/soc/qcom/dcvs/pmu_scmi.c index fd856041cc57..77d4e9bc94fa 100644 --- a/drivers/soc/qcom/dcvs/pmu_scmi.c +++ b/drivers/soc/qcom/dcvs/pmu_scmi.c @@ -19,7 +19,7 @@ static int scmi_pmu_probe(struct scmi_device *sdev) if (!sdev) return -ENODEV; - return rimps_pmu_init(sdev); + return cpucp_pmu_init(sdev); } static const struct scmi_device_id scmi_id_table[] = { diff --git a/include/linux/scmi_memlat.h b/include/linux/scmi_memlat.h deleted file mode 100644 index 9c3579b0e4ef..000000000000 --- a/include/linux/scmi_memlat.h +++ /dev/null @@ -1,73 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * SCMI Vendor Protocols header - * - * Copyright (c) 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. - */ - -#ifndef _SCMI_MEMLAT_H -#define _SCMI_MEMLAT_H - -#include -#include -#include - - -#define SCMI_PROTOCOL_MEMLAT 0x80 -#define MAX_EV_CNTRS 4 /* Maximum number of grp or common events */ -#define MAX_NAME_LEN 20 - -struct scmi_protocol_handle; -/** - * struct scmi_memlat_vendor_ops - represents the various operations provided - * by SCMI HW Memlat Protocol - */ -struct scmi_memlat_vendor_ops { - int (*set_mem_grp)(const struct scmi_protocol_handle *ph, - u32 cpus_mpidr, u32 hw_type); - int (*set_mon)(const struct scmi_protocol_handle *ph, u32 cpus_mpidr, - u32 hw_type, u32 mon_type, u32 index, const char *mon_name); - int (*set_grp_ev_map)(const struct scmi_protocol_handle *ph, u32 hw_type, - void *buf, u32 num_evs); - int (*adaptive_low_freq)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*adaptive_high_freq)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*get_adaptive_cur_freq)(const struct scmi_protocol_handle *ph, u32 hw_type, - u32 mon_idx, void *buf); - int (*set_common_ev_map)(const struct scmi_protocol_handle *ph, void *buf, - u32 num_evs); - int (*ipm_ceil)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*fe_stall_floor)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*be_stall_floor)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*wb_pct_thres)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*wb_filter_ipm)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*freq_scale_pct)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*freq_scale_ceil_mhz)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*freq_scale_floor_mhz)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*sample_ms)(const struct scmi_protocol_handle *ph, u32 val); - int (*freq_map)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 nr_rows, void *buf); - int (*min_freq)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*max_freq)(const struct scmi_protocol_handle *ph, - u32 hw_type, u32 index, u32 val); - int (*get_cur_freq)(const struct scmi_protocol_handle *ph, u32 hw_type, - u32 mon_idx, void *buf); - int (*start_timer)(const struct scmi_protocol_handle *ph); - int (*stop_timer)(const struct scmi_protocol_handle *ph); - int (*set_log_level)(const struct scmi_protocol_handle *ph, u32 val); - int (*flush_cpucp_log)(const struct scmi_protocol_handle *ph); - int (*get_timestamp)(const struct scmi_protocol_handle *ph, void *buf); -}; - -#endif /* _SCMI_MEMLAT_H */ diff --git a/include/linux/scmi_pmu.h b/include/linux/scmi_pmu.h index c281a150e86b..b07d70bd90f6 100644 --- a/include/linux/scmi_pmu.h +++ b/include/linux/scmi_pmu.h @@ -17,18 +17,6 @@ #define SCMI_PMU_PROTOCOL 0x86 #define MAX_NUM_CPUS 8 -enum cpucp_ev_idx { - CPU_CYC_EVT = 0, - CNT_CYC_EVT, - INST_RETIRED_EVT, - STALL_BACKEND_EVT, - L2D_CACHE_REFILL_EVT, - L2D_WB_EVT, - L3_CACHE_REFILL_EVT, - L3_ACCESS_EVT, - LLCC_CACHE_REFILL_EVT, - MAX_CPUCP_EVT, -}; struct scmi_protocol_handle; diff --git a/include/soc/qcom/pmu_lib.h b/include/soc/qcom/pmu_lib.h index 0b04cd3de80b..f13d67c5e480 100644 --- a/include/soc/qcom/pmu_lib.h +++ b/include/soc/qcom/pmu_lib.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _QCOM_PMU_H @@ -8,12 +9,27 @@ #include #include +#if !IS_ENABLED(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) #include +#endif /* (1) ccntr + (6) evcntr + (1) llcc */ #define QCOM_PMU_MAX_EVS 8 #define INVALID_PMU_HW_IDX 0xFF +enum cpucp_ev_idx { + CPU_CYC_EVT = 0, + CNT_CYC_EVT, + INST_RETIRED_EVT, + STALL_BACKEND_EVT, + L2D_CACHE_REFILL_EVT, + L2D_WB_EVT, + L3_CACHE_REFILL_EVT, + L3_ACCESS_EVT, + LLCC_CACHE_REFILL_EVT, + MAX_CPUCP_EVT, +}; + struct cpucp_hlos_map { bool shared; unsigned long cpus; @@ -48,7 +64,7 @@ int qcom_pmu_read_all(int cpu, struct qcom_pmu_data *data); int qcom_pmu_read_all_local(struct qcom_pmu_data *data); int qcom_pmu_idle_register(struct qcom_pmu_notif_node *idle_node); int qcom_pmu_idle_unregister(struct qcom_pmu_notif_node *idle_node); -int rimps_pmu_init(struct scmi_device *sdev); +int cpucp_pmu_init(struct scmi_device *sdev); #else static inline int qcom_pmu_event_supported(u32 event_id, int cpu) { diff --git a/pineapple.bzl b/pineapple.bzl index 6164e5c5a724..a3b84bc71491 100644 --- a/pineapple.bzl +++ b/pineapple.bzl @@ -31,7 +31,6 @@ def define_pineapple(): "drivers/dma/qcom/msm_gpi.ko", "drivers/edac/qcom_edac.ko", "drivers/firmware/arm_scmi/c1dcvs_vendor.ko", - "drivers/firmware/arm_scmi/memlat_vendor.ko", "drivers/firmware/arm_scmi/pmu_vendor.ko", "drivers/firmware/qcom-scm.ko", "drivers/gpu/drm/display/drm_display_helper.ko", @@ -138,7 +137,6 @@ def define_pineapple(): "drivers/soc/qcom/dcvs/c1dcvs_scmi.ko", "drivers/soc/qcom/dcvs/dcvs_fp.ko", "drivers/soc/qcom/dcvs/memlat.ko", - "drivers/soc/qcom/dcvs/memlat_scmi.ko", "drivers/soc/qcom/dcvs/pmu_scmi.ko", "drivers/soc/qcom/dcvs/qcom-dcvs.ko", "drivers/soc/qcom/dcvs/qcom-pmu-lib.ko",