diff --git a/Documentation/admin-guide/kernel-parameters.rst b/Documentation/admin-guide/kernel-parameters.rst index 38a8231700f1..73ae122159c2 100644 --- a/Documentation/admin-guide/kernel-parameters.rst +++ b/Documentation/admin-guide/kernel-parameters.rst @@ -227,3 +227,7 @@ eipv4= [KNL] Sets ipv4 address at boot up for early ethernet. eipv6= [KNL] Sets ipv6 address at boot up for early ethernet. ermac= [KNL] Sets mac address at boot up for early ethernet. + +board= [KNL] Sets Board type of device at boot up for phy detection. + +enet= [KNL] Sets the PHY type on device at boot up for phy detection. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 144f36e13eb7..b784b10da2f2 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -7096,4 +7096,8 @@ eipv6= [KNL] Sets ipv6 address at boot up for early ethernet. - ermac= [KNL] Sets mac address at boot up for early ethernet. \ No newline at end of file + ermac= [KNL] Sets mac address at boot up for early ethernet. + + board= [KNL] Sets Board type of device at boot up for phy detection. + + enet= [KNL] Sets the PHY type on device at boot up for phy detection. diff --git a/android/ACK_SHA b/android/ACK_SHA index 678bfcc7a69d..f9781d3839e1 100644 --- a/android/ACK_SHA +++ b/android/ACK_SHA @@ -1,2 +1,2 @@ -0a5333c8b52768abdbba7bfcffbf36fc761b4cad -android14-6.1-2024-08_r2 +6f645aac97064a41a0bdcb18f1646427fd7ad6b9 +android14-6.1-2024-08_r5 diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig index 12a812e61c16..739a8dab3a94 100644 --- a/arch/arm/mach-qcom/Kconfig +++ b/arch/arm/mach-qcom/Kconfig @@ -46,4 +46,32 @@ config ARCH_MDM9615 bool "Enable support for MDM9615" select CLKSRC_QCOM +config ARCH_MDM9607 + bool "Enable support for MDM9607" + select ARM_GIC + select CPU_V7 + select REGULATOR + select REGULATOR_RPM_SMD + select HAVE_ARM_ARCH_TIMER + select MSM_RPM_SMD + select MEMORY_HOLE_CARVEOUT + select MSM_CORTEX_A7 + select PINCTRL + select QCOM_GDSC + select USE_PINCTRL_IRQ + select MSM_IRQ + select MSM_PM if PM + select PM_DEVFREQ + select MSM_DEVFREQ_DEVBW + select MSM_BIMC_BWMON + select DEVFREQ_GOV_MSM_BW_HWMON + select HWSPINLOCK + select MTD_UBI + select HAVE_CLK_PREPARE + help + Enable support for MDM9607. + + This enables support for MDM9607 SoC devicetree based systems. + If you do not wish to build a kernel that runs on this + chipset or if you are unsure, say 'N' here. endif diff --git a/arch/arm64/configs/vendor/autogvm_GKI.config b/arch/arm64/configs/vendor/autogvm_GKI.config index e650791682ff..043f9f0be278 100644 --- a/arch/arm64/configs/vendor/autogvm_GKI.config +++ b/arch/arm64/configs/vendor/autogvm_GKI.config @@ -5,6 +5,7 @@ CONFIG_ARM_SMMU=m CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y CONFIG_ARM_SMMU_QCOM=m # CONFIG_ARM_SMMU_SELFTEST is not set +CONFIG_BOOTMARKER_PROXY=m CONFIG_CFG80211=m CONFIG_COMMON_CLK_QCOM=m CONFIG_CRYPTO_DEV_QCOM_RNG=m @@ -124,7 +125,6 @@ CONFIG_QTI_QUIN_GVM=y # CONFIG_QTI_THERMAL_MINIDUMP is not set # CONFIG_R8188EU is not set # CONFIG_R8712U is not set -CONFIG_REGULATOR_DEBUG_CONTROL=m CONFIG_REGULATOR_STUB=m CONFIG_RENAME_DEVICES=m CONFIG_RPMSG_QCOM_GLINK=m diff --git a/arch/arm64/configs/vendor/gen3auto_GKI.config b/arch/arm64/configs/vendor/gen3auto_GKI.config index d1abb5c93617..a92b128ec97c 100644 --- a/arch/arm64/configs/vendor/gen3auto_GKI.config +++ b/arch/arm64/configs/vendor/gen3auto_GKI.config @@ -10,6 +10,7 @@ CONFIG_ARM_SMMU_QCOM=m CONFIG_ARM_SMMU_TESTBUS=y CONFIG_ARM_SMMU_TESTBUS_DUMP=y CONFIG_ARM_SMMU_TESTBUS_DUMP_GEN3AUTO=y +CONFIG_BOOTMARKER_PROXY=m CONFIG_CFG80211=m CONFIG_CHR_DEV_SG=m CONFIG_COMMON_CLK_QCOM=m diff --git a/arch/arm64/configs/vendor/neo_la_GKI.config b/arch/arm64/configs/vendor/neo_la_GKI.config index 6f841674696b..478a62b6723d 100644 --- a/arch/arm64/configs/vendor/neo_la_GKI.config +++ b/arch/arm64/configs/vendor/neo_la_GKI.config @@ -40,17 +40,26 @@ CONFIG_HWSPINLOCK_QCOM=m CONFIG_I2C_EUSB2_REPEATER=m CONFIG_I2C_MSM_GENI=m CONFIG_INIT_ON_FREE_DEFAULT_ON=y +CONFIG_INPUT_PM8941_PWRKEY=m +# CONFIG_INPUT_PM8XXX_VIBRATOR is not set +# CONFIG_INPUT_QCOM_HV_HAPTICS is not set CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m CONFIG_INTERCONNECT_QCOM_DEBUG=m CONFIG_INTERCONNECT_QCOM_NEO=m CONFIG_INTERCONNECT_QCOM_QOS=m CONFIG_INTERCONNECT_QCOM_RPMH=m +CONFIG_INTERCONNECT_TEST=m CONFIG_IOMMU_IO_PGTABLE_FAST=y CONFIG_IPC_LOGGING=m CONFIG_IPC_LOG_MINIDUMP_BUFFERS=16 +# CONFIG_LEDS_QPNP_FLASH_V2 is not set +# CONFIG_LEDS_QPNP_VIBRATOR_LDO is not set +# CONFIG_LEDS_QTI_FLASH is not set +# CONFIG_LEDS_QTI_TRI_LED is not set CONFIG_LOCALVERSION="-gki" CONFIG_MAC80211=m CONFIG_MFD_I2C_PMIC=m +CONFIG_MFD_SPMI_PMIC=m CONFIG_MHI_BUS=m CONFIG_MHI_BUS_MISC=y CONFIG_MHI_DTR=m @@ -68,13 +77,18 @@ CONFIG_MSM_RDBG=m CONFIG_MSM_SYSSTATS=m CONFIG_NL80211_TESTMODE=y CONFIG_NVMEM_QCOM_QFPROM=m +CONFIG_NVMEM_SPMI_SDAM=m CONFIG_PCI_MSM=m CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000 CONFIG_PINCTRL_MSM=m CONFIG_PINCTRL_NEO=m +CONFIG_PINCTRL_QCOM_SPMI_PMIC=m +# CONFIG_PM8916_WATCHDOG is not set CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y +CONFIG_POWER_RESET_QCOM_PON=m CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m +# CONFIG_PWM_QTI_LPG is not set CONFIG_QCOM_AOSS_QMP=m CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y CONFIG_QCOM_BAM_DMA=m @@ -82,6 +96,7 @@ CONFIG_QCOM_BWMON=m CONFIG_QCOM_BWPROF=m CONFIG_QCOM_CDSP_RM=m CONFIG_QCOM_CLK_RPMH=m +# CONFIG_QCOM_COINCELL is not set CONFIG_QCOM_COMMAND_DB=m CONFIG_QCOM_CPUSS_SLEEP_STATS=m CONFIG_QCOM_CPU_VENDOR_HOOKS=m @@ -95,6 +110,7 @@ CONFIG_QCOM_DMABUF_HEAPS_CMA=y CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y +# CONFIG_QCOM_EPM is not set CONFIG_QCOM_EUD=m CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y CONFIG_QCOM_GDSC_REGULATOR=m @@ -140,10 +156,15 @@ CONFIG_QCOM_SMP2P=m CONFIG_QCOM_SMP2P_SLEEPSTATE=m CONFIG_QCOM_SOCINFO=m CONFIG_QCOM_SOC_WATCHDOG=m +CONFIG_QCOM_SPMI_ADC5=m +CONFIG_QCOM_SPMI_ADC_TM5=m +# CONFIG_QCOM_SPMI_RRADC is not set +CONFIG_QCOM_SPMI_TEMP_ALARM=m CONFIG_QCOM_STATS=m CONFIG_QCOM_SYSMON=m CONFIG_QCOM_SYSMON_SUBSYSTEM_STATS=m CONFIG_QCOM_TSENS=m +CONFIG_QCOM_VADC_COMMON=m CONFIG_QCOM_VA_MINIDUMP=m CONFIG_QCOM_WATCHDOG_BARK_TIME=11000 CONFIG_QCOM_WATCHDOG_IPI_PING=y @@ -169,6 +190,8 @@ CONFIG_QTI_QMI_COOLING_DEVICE=m # CONFIG_QTI_QMI_SENSOR is not set CONFIG_QTI_SYS_PM_VX=m CONFIG_QTI_USERSPACE_CDEV=m +CONFIG_REBOOT_MODE=m +CONFIG_REGMAP_QTI_DEBUGFS=m CONFIG_REGULATOR_DEBUG_CONTROL=m CONFIG_REGULATOR_PROXY_CONSUMER=m CONFIG_REGULATOR_QCOM_PM8008=m @@ -178,12 +201,16 @@ CONFIG_RPMSG_QCOM_GLINK=m CONFIG_RPMSG_QCOM_GLINK_SMEM=m CONFIG_RPROC_SSR_NOTIF_TIMEOUT=20000 CONFIG_RPROC_SYSMON_NOTIF_TIMEOUT=20000 +CONFIG_RTC_DRV_PM8XXX=m CONFIG_SCHED_WALT=m CONFIG_SERIAL_MSM_GENI=m CONFIG_SLIMBUS=m CONFIG_SLIM_QCOM_NGD_CTRL=m # CONFIG_SND_USB_AUDIO_QMI is not set CONFIG_SPI_MSM_GENI=m +CONFIG_SPMI_MSM_PMIC_ARB=m +CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=m +CONFIG_SPMI_PMIC_CLKDIV=m CONFIG_SPS=m CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_STM=m diff --git a/arch/arm64/configs/vendor/niobe_GKI.config b/arch/arm64/configs/vendor/niobe_GKI.config index 2c90f9e1ca33..739f526eb71d 100644 --- a/arch/arm64/configs/vendor/niobe_GKI.config +++ b/arch/arm64/configs/vendor/niobe_GKI.config @@ -111,7 +111,7 @@ CONFIG_PHY_QCOM_UFS_QRBTC_SDM845=m CONFIG_PHY_QCOM_UFS_V4=m # CONFIG_PHY_QCOM_UFS_V4_BLAIR is not set # CONFIG_PHY_QCOM_UFS_V4_KALAMA is not set -CONFIG_PHY_QCOM_UFS_V4_PINEAPPLE=m +CONFIG_PHY_QCOM_UFS_V4_NIOBE=m # CONFIG_PHY_QCOM_UFS_V4_WAIPIO is not set CONFIG_PINCTRL_MSM=m CONFIG_PINCTRL_NIOBE=m @@ -147,7 +147,7 @@ CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y # CONFIG_QCOM_DMABUF_HEAPS_TUI_CARVEOUT is not set # CONFIG_QCOM_DMABUF_HEAPS_UBWCP is not set # CONFIG_QCOM_DYN_MINIDUMP_STACK is not set -CONFIG_QCOM_EPM=m +# CONFIG_QCOM_EPM is not set CONFIG_QCOM_EUD=m CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y CONFIG_QCOM_FSA4480_I2C=m @@ -191,6 +191,7 @@ CONFIG_QCOM_PDC=m CONFIG_QCOM_PDR_HELPERS=m CONFIG_QCOM_PIL_INFO=m CONFIG_QCOM_PMU_LIB=m +CONFIG_QCOM_POWER_TELEMETRY=m CONFIG_QCOM_Q6V5_COMMON=m CONFIG_QCOM_Q6V5_PAS=m CONFIG_QCOM_QFPROM_SYS=m diff --git a/arch/arm64/configs/vendor/seraph_GKI.config b/arch/arm64/configs/vendor/seraph_GKI.config index 492cc461ba69..48c93244f566 100644 --- a/arch/arm64/configs/vendor/seraph_GKI.config +++ b/arch/arm64/configs/vendor/seraph_GKI.config @@ -1,15 +1,42 @@ CONFIG_ARCH_QCOM=y CONFIG_ARCH_SERAPH=y +CONFIG_ARM_SMMU=m +# CONFIG_ARM_SMMU_CAPTUREBUS_DEBUGFS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=m +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +# CONFIG_ARM_SMMU_SELFTEST is not set +# CONFIG_ARM_SMMU_TESTBUS is not set CONFIG_COMMON_CLK_QCOM=m CONFIG_HWSPINLOCK_QCOM=m +# CONFIG_IOMMU_TLBSYNC_DEBUG is not set CONFIG_LOCALVERSION="-gki" # CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MSM_HAB is not set CONFIG_PINCTRL_MSM=m CONFIG_PINCTRL_SERAPH=m CONFIG_QCOM_COMMAND_DB=m +CONFIG_QCOM_DMABUF_HEAPS=m +CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y +CONFIG_QCOM_DMABUF_HEAPS_CMA=y +CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y +CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y +# CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_MOVABLE is not set +# CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_UNCACHED is not set +# CONFIG_QCOM_DMABUF_HEAPS_TUI_CARVEOUT is not set +# CONFIG_QCOM_DMABUF_HEAPS_UBWCP is not set CONFIG_QCOM_GDSC_REGULATOR=m +CONFIG_QCOM_IOMMU_DEBUG=m +CONFIG_QCOM_IOMMU_UTIL=m +CONFIG_QCOM_LAZY_MAPPING=m +CONFIG_QCOM_MEM_BUF=m +CONFIG_QCOM_MEM_BUF_DEV=m +CONFIG_QCOM_MEM_HOOKS=m +CONFIG_QCOM_MEM_OFFLINE=m CONFIG_QCOM_PDC=m CONFIG_QCOM_RPMH=m CONFIG_QCOM_SCM=m CONFIG_QCOM_SMEM=m CONFIG_QCOM_SOCINFO=m +CONFIG_QTI_IOMMU_SUPPORT=m +CONFIG_REGULATOR_STUB=m diff --git a/autogvm.bzl b/autogvm.bzl index fab19d77e27c..792e9fa2a706 100644 --- a/autogvm.bzl +++ b/autogvm.bzl @@ -40,6 +40,7 @@ def define_autogvm(): "drivers/md/dm-bow.ko", "drivers/media/platform/msm/npu/virtio_npu.ko", "drivers/mfd/qcom-spmi-pmic.ko", + "drivers/misc/bootmarker_proxy.ko", "drivers/misc/qseecom_proxy.ko", "drivers/mmc/host/cqhci.ko", "drivers/mmc/host/sdhci-msm.ko", @@ -73,7 +74,6 @@ def define_autogvm(): "drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko", "drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko", "drivers/power/reset/msm-vm-poweroff.ko", - "drivers/regulator/debug-regulator.ko", "drivers/regulator/stub-regulator.ko", "drivers/regulator/virtio_regulator.ko", "drivers/remoteproc/rproc_qcom_common.ko", diff --git a/drivers/clk/qcom/gcc-anorak.c b/drivers/clk/qcom/gcc-anorak.c index 9c348ff153d3..d910e7dc5679 100644 --- a/drivers/clk/qcom/gcc-anorak.c +++ b/drivers/clk/qcom/gcc-anorak.c @@ -3773,6 +3773,7 @@ static struct clk_branch gcc_usb2_0_clkref_en = { .enable_mask = BIT(0), .hw.init = &(struct clk_init_data){ .name = "gcc_usb2_0_clkref_en", + .flags = CLK_DONT_HOLD_STATE, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/clk/qcom/gcc-pitti.c b/drivers/clk/qcom/gcc-pitti.c index f3c1436c3db3..67ac9de07462 100644 --- a/drivers/clk/qcom/gcc-pitti.c +++ b/drivers/clk/qcom/gcc-pitti.c @@ -3723,6 +3723,7 @@ static struct clk_branch gcc_usb2_clkref_en = { .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "gcc_usb2_clkref_en", + .flags = CLK_DONT_HOLD_STATE, .ops = &clk_branch2_ops, }, }, diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index c93954613691..f94426506fe2 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -18,6 +18,23 @@ config CPU_FREQ If in doubt, say N. +config CPU_FREQ_MSM + bool "CPU Frequency scaling" + select SRCU + help + CPU Frequency scaling allows you to change the clock speed of + CPUs on the fly. This is a nice method to save power, because + the lower the CPU clock speed, the less power the CPU consumes. + + Note that this driver doesn't automatically change the CPU + clock speed, you need to either enable a dynamic cpufreq governor + (see below) after boot, or use a userspace tool. + + For details, take a look at + . + + If in doubt, say N. + if CPU_FREQ config CPU_FREQ_GOV_ATTR_SET diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b34e73cbc64e..f75a876e3e62 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -97,6 +97,7 @@ obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o obj-$(CONFIG_ARM_TEGRA194_CPUFREQ) += tegra194-cpufreq.o obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o +obj-$(CONFIG_CPU_FREQ_MSM) += qcom-cpufreq.o ################################################################################## diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c new file mode 100644 index 000000000000..066c102bec68 --- /dev/null +++ b/drivers/cpufreq/qcom-cpufreq.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2007-2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + * Author: Mike A. Chan + */ + +/* MSM architecture cpufreq driver */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_MUTEX(l2bw_lock); + +static struct thermal_cooling_device *cdev[NR_CPUS]; +static struct clk *cpu_clk[NR_CPUS]; +static struct clk *l2_clk; +static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table); +static bool hotplug_ready; + +struct cpufreq_suspend_t { + struct mutex suspend_mutex; + int device_suspended; +}; + +static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data); +static DEFINE_PER_CPU(int, cached_resolve_idx); +static DEFINE_PER_CPU(unsigned int, cached_resolve_freq); + +#define CPUHP_QCOM_CPUFREQ_PREPARE CPUHP_AP_ONLINE_DYN +#define CPUHP_AP_QCOM_CPUFREQ_STARTING (CPUHP_AP_ONLINE_DYN + 1) + +static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq, + unsigned int index) +{ + int ret = 0; + struct cpufreq_freqs freqs; + unsigned long rate; + + freqs.old = policy->cur; + freqs.new = new_freq; + + //trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu); + cpufreq_freq_transition_begin(policy, &freqs); + + rate = new_freq * 1000; + rate = clk_round_rate(cpu_clk[policy->cpu], rate); + ret = clk_set_rate(cpu_clk[policy->cpu], rate); + cpufreq_freq_transition_end(policy, &freqs, ret); + if (!ret) { + arch_set_freq_scale(policy->related_cpus, new_freq, + policy->cpuinfo.max_freq); + //trace_cpu_frequency_switch_end(policy->cpu); + } + + return ret; +} + +static int msm_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + int ret = 0; + int index; + struct cpufreq_frequency_table *table; + int first_cpu = cpumask_first(policy->related_cpus); + + mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex); + + if (target_freq == policy->cur) + goto done; + + if (per_cpu(suspend_data, policy->cpu).device_suspended) { + pr_debug("cpufreq: cpu%d scheduling frequency change in suspend\n", + policy->cpu); + ret = -EFAULT; + goto done; + } + + table = policy->freq_table; + if (per_cpu(cached_resolve_freq, first_cpu) == target_freq) + index = per_cpu(cached_resolve_idx, first_cpu); + else + index = cpufreq_frequency_table_target(policy, target_freq, + relation); + + pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n", + policy->cpu, target_freq, relation, + policy->min, policy->max, table[index].frequency); + + ret = set_cpu_freq(policy, table[index].frequency, + table[index].driver_data); +done: + mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex); + return ret; +} + +static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy, + unsigned int target_freq) +{ + int index; + int first_cpu = cpumask_first(policy->related_cpus); + unsigned int freq; + + index = cpufreq_frequency_table_target(policy, target_freq, + CPUFREQ_RELATION_L); + freq = policy->freq_table[index].frequency; + + per_cpu(cached_resolve_idx, first_cpu) = index; + per_cpu(cached_resolve_freq, first_cpu) = freq; + + return freq; +} + +static int msm_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static unsigned int msm_cpufreq_get_freq(unsigned int cpu) +{ + return clk_get_rate(cpu_clk[cpu]) / 1000; +} + +static int msm_cpufreq_init(struct cpufreq_policy *policy) +{ + int cur_freq; + int index; + int ret = 0; + struct cpufreq_frequency_table *table = + per_cpu(freq_table, policy->cpu); + int cpu; + + /* + * In some SoC, some cores are clocked by same source, and their + * frequencies can not be changed independently. Find all other + * CPUs that share same clock, and mark them as controlled by + * same policy. + */ + for_each_possible_cpu(cpu) + if (cpu_clk[cpu] == cpu_clk[policy->cpu]) + cpumask_set_cpu(cpu, policy->cpus); + + policy->freq_table = table; + ret = cpufreq_table_validate_and_sort(policy); + if (ret) { + pr_err("cpufreq: failed to get policy min/max\n"); + return ret; + } + + cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000; + + index = cpufreq_frequency_table_target(policy, cur_freq, + CPUFREQ_RELATION_H); + /* + * Call set_cpu_freq unconditionally so that when cpu is set to + * online, frequency limit will always be updated. + */ + ret = set_cpu_freq(policy, table[index].frequency, + table[index].driver_data); + if (ret) + return ret; + pr_debug("cpufreq: cpu%d init at %d switching to %d\n", + policy->cpu, cur_freq, table[index].frequency); + policy->cur = table[index].frequency; + policy->dvfs_possible_from_any_cpu = true; + + return 0; +} + +static int qcom_cpufreq_dead_cpu(unsigned int cpu) +{ + /* Fail hotplug until this driver can get CPU clocks */ + if (!hotplug_ready) + return -EINVAL; + + clk_unprepare(cpu_clk[cpu]); + clk_unprepare(l2_clk); + return 0; +} + +static int qcom_cpufreq_up_cpu(unsigned int cpu) +{ + int rc; + + /* Fail hotplug until this driver can get CPU clocks */ + if (!hotplug_ready) + return -EINVAL; + + rc = clk_prepare(l2_clk); + if (rc < 0) + return rc; + rc = clk_prepare(cpu_clk[cpu]); + if (rc < 0) + clk_unprepare(l2_clk); + return rc; +} + +static int qcom_cpufreq_dying_cpu(unsigned int cpu) +{ + /* Fail hotplug until this driver can get CPU clocks */ + if (!hotplug_ready) + return -EINVAL; + + clk_disable(cpu_clk[cpu]); + clk_disable(l2_clk); + return 0; +} + +static int qcom_cpufreq_starting_cpu(unsigned int cpu) +{ + int rc; + + /* Fail hotplug until this driver can get CPU clocks */ + if (!hotplug_ready) + return -EINVAL; + + rc = clk_enable(l2_clk); + if (rc < 0) + return rc; + rc = clk_enable(cpu_clk[cpu]); + if (rc < 0) + clk_disable(l2_clk); + return rc; +} + +static int msm_cpufreq_suspend(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex); + per_cpu(suspend_data, cpu).device_suspended = 1; + mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex); + } + + return NOTIFY_DONE; +} + +static int msm_cpufreq_resume(void) +{ + int cpu, ret; + struct cpufreq_policy policy; + + for_each_possible_cpu(cpu) { + per_cpu(suspend_data, cpu).device_suspended = 0; + } + + /* + * Freq request might be rejected during suspend, resulting + * in policy->cur violating min/max constraint. + * Correct the frequency as soon as possible. + */ + cpus_read_lock(); + for_each_online_cpu(cpu) { + ret = cpufreq_get_policy(&policy, cpu); + if (ret) + continue; + if (policy.cur <= policy.max && policy.cur >= policy.min) + continue; + cpufreq_update_policy(cpu); + } + cpus_read_unlock(); + + return NOTIFY_DONE; +} + +static int msm_cpufreq_pm_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + switch (event) { + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + return msm_cpufreq_resume(); + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + return msm_cpufreq_suspend(); + default: + return NOTIFY_DONE; + } +} + +static struct notifier_block msm_cpufreq_pm_notifier = { + .notifier_call = msm_cpufreq_pm_event, +}; + +static struct freq_attr *msm_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static void msm_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct device_node *np; + unsigned int cpu = policy->cpu; + + if (cdev[cpu]) + return; + + np = of_cpu_device_node_get(cpu); + if (WARN_ON(!np)) + return; + + /* + * For now, just loading the cooling device; + * thermal DT code takes care of matching them. + */ + if (of_find_property(np, "#cooling-cells", NULL)) { + cdev[cpu] = of_cpufreq_cooling_register(policy); + if (IS_ERR(cdev[cpu])) { + pr_err("running cpufreq for CPU%d without cooling dev: %ld\n", + cpu, PTR_ERR(cdev[cpu])); + cdev[cpu] = NULL; + } + } + + of_node_put(np); +} + +static struct cpufreq_driver msm_cpufreq_driver = { + /* lps calculations are handled here. */ + .flags = CPUFREQ_NEED_UPDATE_LIMITS | CPUFREQ_CONST_LOOPS | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .init = msm_cpufreq_init, + .verify = msm_cpufreq_verify, + .target = msm_cpufreq_target, + .fast_switch = msm_cpufreq_resolve_freq, + .get = msm_cpufreq_get_freq, + .name = "msm", + .attr = msm_freq_attr, + .ready = msm_cpufreq_ready, +}; + +static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev, + char *tbl_name, int cpu) +{ + int ret, nf, i, j; + u32 *data; + struct cpufreq_frequency_table *ftbl; + + /* Parse list of usable CPU frequencies. */ + if (!of_find_property(dev->of_node, tbl_name, &nf)) + return ERR_PTR(-EINVAL); + nf /= sizeof(*data); + + if (nf == 0) + return ERR_PTR(-EINVAL); + + data = kcalloc(nf, sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf); + if (ret) + return ERR_PTR(ret); + + ftbl = kcalloc((nf + 1), sizeof(*ftbl), GFP_KERNEL); + if (!ftbl) + return ERR_PTR(-ENOMEM); + + j = 0; + for (i = 0; i < nf; i++) { + unsigned long f; + + f = clk_round_rate(cpu_clk[cpu], data[i] * 1000); + if (IS_ERR_VALUE(f)) + break; + f /= 1000; + + /* + * Don't repeat frequencies if they round up to the same clock + * frequency. + * + */ + if (j > 0 && f <= ftbl[j - 1].frequency) + continue; + + ftbl[j].driver_data = j; + ftbl[j].frequency = f; + j++; + } + + ftbl[j].driver_data = j; + ftbl[j].frequency = CPUFREQ_TABLE_END; + + kfree(data); + + return ftbl; +} + +static int msm_cpufreq_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + char clk_name[] = "cpu??_clk"; + char tbl_name[] = "qcom,cpufreq-table-??"; + struct clk *c; + int cpu, ret; + struct cpufreq_frequency_table *ftbl; + + l2_clk = devm_clk_get(dev, "l2_clk"); + if (IS_ERR(l2_clk)) + l2_clk = NULL; + + for_each_possible_cpu(cpu) { + snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu); + c = devm_clk_get(dev, clk_name); + if (cpu == 0 && IS_ERR(c)) + return PTR_ERR(c); + else if (IS_ERR(c)) + c = cpu_clk[cpu-1]; + cpu_clk[cpu] = c; + } + hotplug_ready = true; + + /* Use per-policy governor tunable for some targets */ + if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy")) + msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY; + + /* Parse commong cpufreq table for all CPUs */ + ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0); + if (!IS_ERR(ftbl)) { + for_each_possible_cpu(cpu) + per_cpu(freq_table, cpu) = ftbl; + goto out_register; + } + + /* + * No common table. Parse individual tables for each unique + * CPU clock. + */ + for_each_possible_cpu(cpu) { + snprintf(tbl_name, sizeof(tbl_name), + "qcom,cpufreq-table-%d", cpu); + ftbl = cpufreq_parse_dt(dev, tbl_name, cpu); + + /* CPU0 must contain freq table */ + if (cpu == 0 && IS_ERR(ftbl)) { + dev_err(dev, "Failed to parse CPU0's freq table\n"); + return PTR_ERR(ftbl); + } + if (cpu == 0) { + per_cpu(freq_table, cpu) = ftbl; + continue; + } + + if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) { + dev_err(dev, "Failed to parse CPU%d's freq table\n", + cpu); + return PTR_ERR(ftbl); + } + + /* Use previous CPU's table if it shares same clock */ + if (cpu_clk[cpu] == cpu_clk[cpu - 1]) { + if (!IS_ERR(ftbl)) { + dev_warn(dev, "Conflicting tables for CPU%d\n", + cpu); + kfree(ftbl); + } + ftbl = per_cpu(freq_table, cpu - 1); + } + per_cpu(freq_table, cpu) = ftbl; + } + +out_register: + ret = register_pm_notifier(&msm_cpufreq_pm_notifier); + if (ret) + return ret; + + ret = cpufreq_register_driver(&msm_cpufreq_driver); + if (ret) + unregister_pm_notifier(&msm_cpufreq_pm_notifier); + else + dev_err(dev, "Probe successful\n"); + + return ret; +} + +static const struct of_device_id msm_cpufreq_match_table[] = { + { .compatible = "qcom,msm-cpufreq" }, + {} +}; + +static struct platform_driver msm_cpufreq_plat_driver = { + .probe = msm_cpufreq_probe, + .driver = { + .name = "msm-cpufreq", + .of_match_table = msm_cpufreq_match_table, + }, +}; + +static int __init msm_cpufreq_register(void) +{ + int cpu, rc; + + for_each_possible_cpu(cpu) { + mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex)); + per_cpu(suspend_data, cpu).device_suspended = 0; + per_cpu(cached_resolve_freq, cpu) = UINT_MAX; + } + + rc = platform_driver_register(&msm_cpufreq_plat_driver); + if (rc < 0) { + /* Unblock hotplug if msm-cpufreq probe fails */ + cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE); + cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING); + for_each_possible_cpu(cpu) + mutex_destroy(&(per_cpu(suspend_data, cpu). + suspend_mutex)); + return rc; + } + + return 0; +} + +subsys_initcall(msm_cpufreq_register); + +static int __init msm_cpufreq_early_register(void) +{ + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING, + "AP_QCOM_CPUFREQ_STARTING", + qcom_cpufreq_starting_cpu, + qcom_cpufreq_dying_cpu); + if (ret) + return ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE, + "QCOM_CPUFREQ_PREPARE", + qcom_cpufreq_up_cpu, + qcom_cpufreq_dead_cpu); + if (!ret) + return ret; + cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING); + return ret; +} +core_initcall(msm_cpufreq_early_register); diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig index 7c44a3bb25f4..9ffadc7a50b6 100644 --- a/drivers/devfreq/Kconfig +++ b/drivers/devfreq/Kconfig @@ -84,6 +84,14 @@ config DEVFREQ_GOV_QCOM_BW_HWMON can conflict with existing profiling tools. This governor is unlikely to be useful for non-QCOM devices. +config DEVFREQ_GOV_CPUFREQ + tristate "CPUfreq" + depends on CPU_FREQ + help + Chooses frequency based on the online CPUs' current frequency and a + CPU frequency to device frequency mapping table(s). This governor + can be useful for controlling devices such as DDR, cache, CCI, etc. + comment "DEVFREQ Drivers" config ARM_EXYNOS_BUS_DEVFREQ diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile index 47e9aec7d7b4..07adfac093da 100644 --- a/drivers/devfreq/Makefile +++ b/drivers/devfreq/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON) += governor_bw_hwmon.o +obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ) += governor_cpufreq.o # DEVFREQ Drivers obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o diff --git a/drivers/devfreq/devfreq_icc.c b/drivers/devfreq/devfreq_icc.c new file mode 100644 index 000000000000..6ce8fc3f874d --- /dev/null +++ b/drivers/devfreq/devfreq_icc.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2013-2014, 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "devfreq-icc: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* Has to be UL to avoid errors in 32 bit. Use cautiously to avoid overflows.*/ +#define MBYTE (1UL << 20) +#define HZ_TO_MBPS(hz, w) (mult_frac(w, hz, MBYTE)) +#define MBPS_TO_HZ(mbps, w) (mult_frac(mbps, MBYTE, w)) +#define MBPS_TO_ICC(mbps) (mult_frac(mbps, MBYTE, 1000)) + +enum dev_type { + STD_MBPS_DEV, + L3_HZ_DEV, + L3_MBPS_DEV, + NUM_DEV_TYPES +}; + +struct devfreq_icc_spec { + enum dev_type type; +}; + +struct dev_data { + struct icc_path *icc_path; + u32 cur_ab; + u32 cur_ib; + unsigned long gov_ab; + const struct devfreq_icc_spec *spec; + unsigned int width; + struct devfreq *df; + struct devfreq_dev_profile dp; +}; + +#define MAX_L3_ENTRIES 40U +static unsigned long l3_freqs[MAX_L3_ENTRIES]; +static DEFINE_MUTEX(l3_freqs_lock); +static bool use_cached_l3_freqs; + +static u64 mbps_to_hz_icc(u32 in, uint width) +{ + u64 result; + u32 quot = in / width; + u32 rem = in % width; + + result = quot * MBYTE + div_u64(rem * MBYTE, width); + return result; +} + +static int set_bw(struct device *dev, u32 new_ib, u32 new_ab) +{ + struct dev_data *d = dev_get_drvdata(dev); + int ret; + u64 icc_ib = new_ib, icc_ab = new_ab; + + if (d->cur_ib == new_ib && d->cur_ab == new_ab) + return 0; + + if (d->spec->type == L3_MBPS_DEV) { + icc_ib = mbps_to_hz_icc(new_ib, d->width); + icc_ab = mbps_to_hz_icc(new_ab, d->width); + } else if (d->spec->type == STD_MBPS_DEV) { + icc_ib = mbps_to_hz_icc(new_ib, 1000); + icc_ab = mbps_to_hz_icc(new_ab, 1000); + } + + dev_dbg(dev, "ICC BW: AB: %llu IB: %llu\n", icc_ab, icc_ib); + + ret = icc_set_bw(d->icc_path, icc_ab, icc_ib); + if (ret < 0) { + dev_err(dev, "icc set bandwidth request failed (%d)\n", ret); + } else { + d->cur_ib = new_ib; + d->cur_ab = new_ab; + } + + return ret; +} + +static int icc_target(struct device *dev, unsigned long *freq, u32 flags) +{ + struct dev_data *d = dev_get_drvdata(dev); + struct dev_pm_opp *opp; + + opp = devfreq_recommended_opp(dev, freq, flags); + if (!IS_ERR(opp)) + dev_pm_opp_put(opp); + + return set_bw(dev, *freq, d->gov_ab); +} + +static int icc_get_dev_status(struct device *dev, + struct devfreq_dev_status *stat) +{ + struct dev_data *d = dev_get_drvdata(dev); + + stat->private_data = &d->gov_ab; + return 0; +} + +#define INIT_HZ 300000000UL +#define XO_HZ 19200000UL +#define FTBL_ROW_SIZE 4 +#define SRC_MASK GENMASK(31, 30) +#define SRC_SHIFT 30 +#define MULT_MASK GENMASK(7, 0) + +static int populate_l3_opp_table(struct device *dev) +{ + struct dev_data *d = dev_get_drvdata(dev); + int idx, ret; + u32 data, src, mult, i; + unsigned long freq, prev_freq = 0; + struct resource res; + void __iomem *ftbl_base; + unsigned int ftbl_row_size = FTBL_ROW_SIZE; + + idx = of_property_match_string(dev->of_node, "reg-names", "ftbl-base"); + if (idx < 0) { + dev_err(dev, "Unable to find ftbl-base: %d\n", idx); + return -EINVAL; + } + + ret = of_address_to_resource(dev->of_node, idx, &res); + if (ret < 0) { + dev_err(dev, "Unable to get resource from address: %d\n", ret); + return -EINVAL; + } + + ftbl_base = ioremap(res.start, resource_size(&res)); + if (!ftbl_base) { + dev_err(dev, "Unable to map ftbl-base!\n"); + return -ENOMEM; + } + + of_property_read_u32(dev->of_node, "qcom,ftbl-row-size", + &ftbl_row_size); + + for (i = 0; i < MAX_L3_ENTRIES; i++) { + data = readl_relaxed(ftbl_base + i * ftbl_row_size); + src = ((data & SRC_MASK) >> SRC_SHIFT); + mult = (data & MULT_MASK); + freq = src ? XO_HZ * mult : INIT_HZ; + + /* Two of the same frequencies means end of table */ + if (i > 0 && prev_freq == freq) + break; + + if (d->spec->type == L3_MBPS_DEV) + dev_pm_opp_add(dev, HZ_TO_MBPS(freq, d->width), 0); + else + dev_pm_opp_add(dev, freq, 0); + l3_freqs[i] = freq; + prev_freq = freq; + } + + iounmap(ftbl_base); + use_cached_l3_freqs = true; + + return 0; +} + +static int copy_l3_opp_table(struct device *dev) +{ + struct dev_data *d = dev_get_drvdata(dev); + int idx; + + for (idx = 0; idx < MAX_L3_ENTRIES; idx++) { + if (!l3_freqs[idx]) + break; + + if (d->spec->type == L3_MBPS_DEV) + dev_pm_opp_add(dev, + HZ_TO_MBPS(l3_freqs[idx], d->width), 0); + else + dev_pm_opp_add(dev, l3_freqs[idx], 0); + } + + if (!idx) { + dev_err(dev, "No L3 frequencies copied for device!\n"); + return -EINVAL; + } + + return 0; +} + +#define PROP_ACTIVE "qcom,active-only" +#define ACTIVE_ONLY_TAG 0x3 + +int devfreq_add_icc(struct device *dev) +{ + struct dev_data *d; + struct devfreq_dev_profile *p; + const char *gov_name; + int ret; + struct opp_table *opp_table; + u32 version; + + d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + dev_set_drvdata(dev, d); + + d->spec = of_device_get_match_data(dev); + if (!d->spec) { + dev_err(dev, "Unknown device type!\n"); + return -ENODEV; + } + + p = &d->dp; + p->polling_ms = 500; + p->target = icc_target; + p->get_dev_status = icc_get_dev_status; + + if (of_device_is_compatible(dev->of_node, "qcom,devfreq-icc-ddr")) { + version = (1 << of_fdt_get_ddrtype()); + opp_table = dev_pm_opp_get_opp_table(dev); + if (IS_ERR(opp_table)) { + dev_err(dev, "Failed to set supported hardware\n"); + return PTR_ERR(opp_table); + } + } + + if (d->spec->type == L3_MBPS_DEV) { + ret = of_property_read_u32(dev->of_node, "qcom,bus-width", + &d->width); + if (ret < 0 || !d->width) { + dev_err(dev, "Missing or invalid bus-width: %d\n", ret); + return -EINVAL; + } + } + + if (d->spec->type == L3_HZ_DEV || d->spec->type == L3_MBPS_DEV) { + mutex_lock(&l3_freqs_lock); + if (use_cached_l3_freqs) { + mutex_unlock(&l3_freqs_lock); + ret = copy_l3_opp_table(dev); + } else { + ret = populate_l3_opp_table(dev); + mutex_unlock(&l3_freqs_lock); + } + } else { + ret = dev_pm_opp_of_add_table(dev); + } + if (ret < 0) + dev_err(dev, "Couldn't parse OPP table:%d\n", ret); + + d->icc_path = of_icc_get(dev, NULL); + if (IS_ERR(d->icc_path)) { + ret = PTR_ERR(d->icc_path); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Unable to register icc path: %d\n", ret); + return ret; + } + + if (of_property_read_bool(dev->of_node, PROP_ACTIVE)) + icc_set_tag(d->icc_path, ACTIVE_ONLY_TAG); + + if (of_property_read_string(dev->of_node, "governor", &gov_name)) + gov_name = "performance"; + + d->df = devfreq_add_device(dev, p, gov_name, NULL); + if (IS_ERR(d->df)) { + icc_put(d->icc_path); + return PTR_ERR(d->df); + } + + return 0; +} + +int devfreq_remove_icc(struct device *dev) +{ + struct dev_data *d = dev_get_drvdata(dev); + + icc_put(d->icc_path); + devfreq_remove_device(d->df); + return 0; +} + +int devfreq_suspend_icc(struct device *dev) +{ + struct dev_data *d = dev_get_drvdata(dev); + + return devfreq_suspend_device(d->df); +} + +int devfreq_resume_icc(struct device *dev) +{ + struct dev_data *d = dev_get_drvdata(dev); + + return devfreq_resume_device(d->df); +} + +static int devfreq_icc_probe(struct platform_device *pdev) +{ + return devfreq_add_icc(&pdev->dev); +} + +static int devfreq_icc_remove(struct platform_device *pdev) +{ + return devfreq_remove_icc(&pdev->dev); +} + +static const struct devfreq_icc_spec spec[] = { + [0] = { STD_MBPS_DEV }, + [1] = { L3_HZ_DEV }, + [2] = { L3_MBPS_DEV }, +}; + +static const struct of_device_id devfreq_icc_match_table[] = { + { .compatible = "qcom,devfreq-icc-l3bw", .data = &spec[2] }, + { .compatible = "qcom,devfreq-icc-l3", .data = &spec[1] }, + { .compatible = "qcom,devfreq-icc-llcc", .data = &spec[0] }, + { .compatible = "qcom,devfreq-icc-ddr", .data = &spec[0] }, + { .compatible = "qcom,devfreq-icc", .data = &spec[0] }, + {} +}; + +static struct platform_driver devfreq_icc_driver = { + .probe = devfreq_icc_probe, + .remove = devfreq_icc_remove, + .driver = { + .name = "devfreq-icc", + .of_match_table = devfreq_icc_match_table, + .suppress_bind_attrs = true, + }, +}; +module_platform_driver(devfreq_icc_driver); + +MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs"); +MODULE_LICENSE("GPL"); diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c new file mode 100644 index 000000000000..8863cd0f0a9d --- /dev/null +++ b/drivers/devfreq/governor_cpufreq.c @@ -0,0 +1,729 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "dev-cpufreq: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include "governor.h" + +struct cpu_state { + unsigned int freq; + unsigned int min_freq; + unsigned int max_freq; + bool on; + unsigned int first_cpu; +}; +static struct cpu_state *state[NR_CPUS]; +static int cpufreq_cnt; + +struct freq_map { + unsigned int cpu_khz; + unsigned int target_freq; +}; + +struct devfreq_node { + struct devfreq *df; + void *orig_data; + struct device *dev; + struct device_node *of_node; + struct list_head list; + struct freq_map **map; + struct freq_map *common_map; + unsigned int timeout; + struct delayed_work dwork; + bool drop; + unsigned long prev_tgt; +}; +static LIST_HEAD(devfreq_list); +static DEFINE_MUTEX(state_lock); +static DEFINE_MUTEX(cpufreq_reg_lock); + +#define show_attr(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct devfreq *df = to_devfreq(dev); \ + struct devfreq_node *n = df->data; \ + return scnprintf(buf, PAGE_SIZE, "%u\n", n->name); \ +} + +#define store_attr(name, _min, _max) \ +static ssize_t name##_store(struct device *dev, \ + struct device_attribute *attr, const char *buf, \ + size_t count) \ +{ \ + struct devfreq *df = to_devfreq(dev); \ + struct devfreq_node *n = df->data; \ + int ret; \ + unsigned int val; \ + ret = kstrtoint(buf, 10, &val); \ + if (ret) \ + return ret; \ + val = max(val, _min); \ + val = min(val, _max); \ + n->name = val; \ + return count; \ +} + +static int update_node(struct devfreq_node *node) +{ + int ret; + struct devfreq *df = node->df; + + if (!df) + return 0; + + cancel_delayed_work_sync(&node->dwork); + + mutex_lock(&df->lock); + node->drop = false; + ret = update_devfreq(df); + if (ret) { + dev_err(df->dev.parent, "Unable to update frequency\n"); + goto out; + } + + if (!node->timeout) + goto out; + + if (df->previous_freq <= df->scaling_min_freq) + goto out; + + schedule_delayed_work(&node->dwork, + msecs_to_jiffies(node->timeout)); +out: + mutex_unlock(&df->lock); + return ret; +} + +static void update_all_devfreqs(void) +{ + struct devfreq_node *node; + + list_for_each_entry(node, &devfreq_list, list) { + update_node(node); + } +} + +static void do_timeout(struct work_struct *work) +{ + struct devfreq_node *node = container_of(to_delayed_work(work), + struct devfreq_node, dwork); + struct devfreq *df = node->df; + + mutex_lock(&df->lock); + node->drop = true; + update_devfreq(df); + mutex_unlock(&df->lock); +} + +static struct devfreq_node *find_devfreq_node(struct device *dev) +{ + struct devfreq_node *node; + + list_for_each_entry(node, &devfreq_list, list) + if (node->dev == dev || node->of_node == dev->of_node) + return node; + + return NULL; +} + +/* ==================== cpufreq part ==================== */ +static void add_policy(struct cpufreq_policy *policy) +{ + struct cpu_state *new_state; + unsigned int cpu, first_cpu; + + if (state[policy->cpu]) { + state[policy->cpu]->freq = policy->cur; + state[policy->cpu]->on = true; + } else { + new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL); + if (!new_state) + return; + + first_cpu = cpumask_first(policy->related_cpus); + new_state->first_cpu = first_cpu; + new_state->freq = policy->cur; + new_state->min_freq = policy->cpuinfo.min_freq; + new_state->max_freq = policy->cpuinfo.max_freq; + new_state->on = true; + + for_each_cpu(cpu, policy->related_cpus) + state[cpu] = new_state; + } +} + +static int cpufreq_trans_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_freqs *freq = data; + struct cpu_state *s; + + if (event != CPUFREQ_POSTCHANGE) + return 0; + + mutex_lock(&state_lock); + + s = state[freq->policy->cpu]; + if (!s) + goto out; + + if (s->freq != freq->new) { + s->freq = freq->new; + update_all_devfreqs(); + } + +out: + mutex_unlock(&state_lock); + return 0; +} + +static struct notifier_block cpufreq_trans_nb = { + .notifier_call = cpufreq_trans_notifier +}; + +static int devfreq_cpufreq_hotplug_coming_up(unsigned int cpu) +{ + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_err("Policy is null for cpu =%d\n", cpu); + return 0; + } + mutex_lock(&state_lock); + add_policy(policy); + update_all_devfreqs(); + mutex_unlock(&state_lock); + return 0; +} + +static int devfreq_cpufreq_hotplug_going_down(unsigned int cpu) +{ + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(cpu); + if (!policy) { + pr_err("Policy is null for cpu =%d\n", cpu); + return 0; + } + mutex_lock(&state_lock); + if (state[policy->cpu]) { + state[policy->cpu]->on = false; + update_all_devfreqs(); + } + mutex_unlock(&state_lock); + return 0; +} + +static int devfreq_cpufreq_cpu_hp_init(void) +{ + int ret = 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "DEVFREQ_CPUFREQ", + devfreq_cpufreq_hotplug_coming_up, + devfreq_cpufreq_hotplug_going_down); + if (ret < 0) { + cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); + pr_err("devfreq-cpufreq: failed to register HP notifier: %d\n", + ret); + } else + ret = 0; + return ret; +} + +static int register_cpufreq(void) +{ + int ret = 0; + unsigned int cpu; + struct cpufreq_policy *policy; + + mutex_lock(&cpufreq_reg_lock); + + if (cpufreq_cnt) + goto cnt_not_zero; + + cpus_read_lock(); + + ret = devfreq_cpufreq_cpu_hp_init(); + if (ret < 0) + goto out; + + ret = cpufreq_register_notifier(&cpufreq_trans_nb, + CPUFREQ_TRANSITION_NOTIFIER); + + if (ret) + goto out; + + for_each_online_cpu(cpu) { + policy = cpufreq_cpu_get(cpu); + if (policy) { + add_policy(policy); + cpufreq_cpu_put(policy); + } + } +out: + cpus_read_unlock(); +cnt_not_zero: + if (!ret) + cpufreq_cnt++; + mutex_unlock(&cpufreq_reg_lock); + return ret; +} + +static int unregister_cpufreq(void) +{ + int cpu; + + mutex_lock(&cpufreq_reg_lock); + + if (cpufreq_cnt > 1) + goto out; + + cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); + + cpufreq_unregister_notifier(&cpufreq_trans_nb, + CPUFREQ_TRANSITION_NOTIFIER); + + for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) { + if (!state[cpu]) + continue; + if (state[cpu]->first_cpu == cpu) + kfree(state[cpu]); + state[cpu] = NULL; + } + +out: + cpufreq_cnt--; + mutex_unlock(&cpufreq_reg_lock); + return 0; +} + +/* ==================== devfreq part ==================== */ + +static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu) +{ + unsigned long *freq_table = df->profile->freq_table; + unsigned int cpu_min = state[cpu]->min_freq; + unsigned int cpu_max = state[cpu]->max_freq; + unsigned int cpu_freq = state[cpu]->freq; + unsigned int dev_min, dev_max, cpu_percent; + + if (freq_table) { + dev_min = freq_table[0]; + dev_max = freq_table[df->profile->max_state - 1]; + } else { + if (df->scaling_max_freq <= df->scaling_min_freq) + return 0; + dev_min = df->scaling_min_freq; + dev_max = df->scaling_max_freq; + } + + cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min); + return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100); +} + +static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu) +{ + struct freq_map *map = NULL; + unsigned int cpu_khz = 0, freq; + struct devfreq_node *n = df->data; + + if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) { + freq = 0; + goto out; + } + + if (n->common_map) + map = n->common_map; + else if (n->map) + map = n->map[cpu]; + + cpu_khz = state[cpu]->freq; + + if (!map) { + freq = interpolate_freq(df, cpu); + goto out; + } + + while (map->cpu_khz && map->cpu_khz < cpu_khz) + map++; + if (!map->cpu_khz) + map--; + freq = map->target_freq; + +out: + dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq); + return freq; +} + +static int devfreq_cpufreq_get_freq(struct devfreq *df, + unsigned long *freq) +{ + unsigned int cpu, tgt_freq = 0; + struct devfreq_node *node; + + node = df->data; + if (!node) { + pr_err("Unable to find devfreq node!\n"); + return -ENODEV; + } + + if (node->drop) { + *freq = 0; + return 0; + } + + for_each_possible_cpu(cpu) + tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu)); + + if (node->timeout && tgt_freq < node->prev_tgt) + *freq = 0; + else + *freq = tgt_freq; + + node->prev_tgt = tgt_freq; + + return 0; +} + +static unsigned int show_table(char *buf, unsigned int len, + struct freq_map *map) +{ + unsigned int cnt = 0; + + cnt += scnprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n"); + + while (map->cpu_khz && cnt < len) { + cnt += scnprintf(buf + cnt, len - cnt, "%8u\t%11u\n", + map->cpu_khz, map->target_freq); + map++; + } + if (cnt < len) + cnt += scnprintf(buf + cnt, len - cnt, "\n"); + + return cnt; +} + +static ssize_t freq_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct devfreq *df = to_devfreq(dev); + struct devfreq_node *n = df->data; + struct freq_map *map; + unsigned int cnt = 0, cpu; + + mutex_lock(&state_lock); + if (n->common_map) { + map = n->common_map; + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, + "Common table for all CPUs:\n"); + cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map); + } else if (n->map) { + for_each_possible_cpu(cpu) { + map = n->map[cpu]; + if (!map) + continue; + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, + "CPU %u:\n", cpu); + if (cnt >= PAGE_SIZE) + break; + cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map); + if (cnt >= PAGE_SIZE) + break; + } + } else { + cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, + "Device freq interpolated based on CPU freq\n"); + } + mutex_unlock(&state_lock); + + return cnt; +} + +static DEVICE_ATTR_RO(freq_map); +show_attr(timeout); +store_attr(timeout, 0U, 100U); +static DEVICE_ATTR_RW(timeout); + +static struct attribute *dev_attr[] = { + &dev_attr_freq_map.attr, + &dev_attr_timeout.attr, + NULL, +}; + +static struct attribute_group dev_attr_group = { + .name = "cpufreq", + .attrs = dev_attr, +}; + +static int devfreq_cpufreq_gov_start(struct devfreq *devfreq) +{ + int ret = 0; + struct devfreq_node *node; + bool alloc = false; + + ret = register_cpufreq(); + if (ret) + return ret; + + ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group); + if (ret) { + unregister_cpufreq(); + return ret; + } + + mutex_lock(&state_lock); + + node = find_devfreq_node(devfreq->dev.parent); + if (node == NULL) { + node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto alloc_fail; + } + alloc = true; + node->dev = devfreq->dev.parent; + list_add_tail(&node->list, &devfreq_list); + } + + INIT_DELAYED_WORK(&node->dwork, do_timeout); + + node->df = devfreq; + node->orig_data = devfreq->data; + devfreq->data = node; + + ret = update_node(node); + if (ret) + goto update_fail; + + mutex_unlock(&state_lock); + return 0; + +update_fail: + devfreq->data = node->orig_data; + if (alloc) { + list_del(&node->list); + kfree(node); + } +alloc_fail: + mutex_unlock(&state_lock); + sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); + unregister_cpufreq(); + return ret; +} + +static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq) +{ + struct devfreq_node *node = devfreq->data; + + cancel_delayed_work_sync(&node->dwork); + + mutex_lock(&state_lock); + devfreq->data = node->orig_data; + if (node->map || node->common_map) { + node->df = NULL; + } else { + list_del(&node->list); + kfree(node); + } + mutex_unlock(&state_lock); + + sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group); + unregister_cpufreq(); +} + +static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq, + unsigned int event, void *data) +{ + int ret; + + switch (event) { + case DEVFREQ_GOV_START: + + ret = devfreq_cpufreq_gov_start(devfreq); + if (ret) { + pr_err("Governor start failed!\n"); + return ret; + } + pr_debug("Enabled dev CPUfreq governor\n"); + break; + + case DEVFREQ_GOV_STOP: + + devfreq_cpufreq_gov_stop(devfreq); + pr_debug("Disabled dev CPUfreq governor\n"); + break; + } + + return 0; +} + +static struct devfreq_governor devfreq_cpufreq = { + .name = "cpufreq", + .get_target_freq = devfreq_cpufreq_get_freq, + .event_handler = devfreq_cpufreq_ev_handler, +}; + +#define NUM_COLS 2 +static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name) +{ + int len, nf, i, j; + u32 data; + struct freq_map *tbl; + + if (!of_find_property(of_node, prop_name, &len)) + return NULL; + len /= sizeof(data); + + if (len % NUM_COLS || len == 0) + return NULL; + nf = len / NUM_COLS; + + tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL); + if (!tbl) + return NULL; + + for (i = 0, j = 0; i < nf; i++, j += 2) { + of_property_read_u32_index(of_node, prop_name, j, &data); + tbl[i].cpu_khz = data; + + of_property_read_u32_index(of_node, prop_name, j + 1, &data); + tbl[i].target_freq = data; + } + tbl[i].cpu_khz = 0; + + return tbl; +} + +#define PROP_TARGET "target-dev" +#define PROP_TABLE "cpu-to-dev-map" +static int add_table_from_of(struct device_node *of_node) +{ + struct device_node *target_of_node; + struct devfreq_node *node; + struct freq_map *common_tbl; + struct freq_map **tbl_list = NULL; + static char prop_name[] = PROP_TABLE "-999999"; + int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name); + + target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0); + if (!target_of_node) + return -EINVAL; + + node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + common_tbl = read_tbl(of_node, PROP_TABLE); + if (!common_tbl) { + tbl_list = kcalloc(num_possible_cpus(), sizeof(*tbl_list), GFP_KERNEL); + if (!tbl_list) { + ret = -ENOMEM; + goto err_list; + } + + for_each_possible_cpu(cpu) { + ret = scnprintf(prop_name, prop_sz, "%s-%d", + PROP_TABLE, cpu); + if (ret >= prop_sz) { + pr_warn("More CPUs than I can handle!\n"); + pr_warn("Skipping rest of the tables!\n"); + break; + } + tbl_list[cpu] = read_tbl(of_node, prop_name); + if (tbl_list[cpu]) + cnt++; + } + } + if (!common_tbl && !cnt) { + ret = -EINVAL; + goto err_tbl; + } + + mutex_lock(&state_lock); + node->of_node = target_of_node; + node->map = tbl_list; + node->common_map = common_tbl; + list_add_tail(&node->list, &devfreq_list); + mutex_unlock(&state_lock); + + return 0; +err_tbl: + kfree(tbl_list); +err_list: + kfree(node); + return ret; +} + +static int __init devfreq_cpufreq_init(void) +{ + int ret; + struct device_node *of_par, *of_child; + + of_par = of_find_node_by_name(NULL, "devfreq-cpufreq"); + if (of_par) { + for_each_child_of_node(of_par, of_child) { + ret = add_table_from_of(of_child); + if (ret) + pr_err("Parsing %s failed!\n", of_child->name); + else + pr_debug("Parsed %s.\n", of_child->name); + } + of_node_put(of_par); + } else { + pr_info("No tables parsed from DT.\n"); + return 0; + } + + ret = devfreq_add_governor(&devfreq_cpufreq); + if (ret) { + pr_err("Governor add failed!\n"); + return ret; + } + pr_err("Governor add success for cpufreq!\n"); + + return 0; +} +subsys_initcall(devfreq_cpufreq_init); + +static void __exit devfreq_cpufreq_exit(void) +{ + int ret, cpu; + struct devfreq_node *node, *tmp; + struct device_node *of_par; + + of_par = of_find_node_by_name(NULL, "devfreq-cpufreq"); + if (!of_par) + return; + + ret = devfreq_remove_governor(&devfreq_cpufreq); + if (ret) + pr_err("Governor remove failed!\n"); + + mutex_lock(&state_lock); + list_for_each_entry_safe(node, tmp, &devfreq_list, list) { + kfree(node->common_map); + for_each_possible_cpu(cpu) + kfree(node->map[cpu]); + kfree(node->map); + list_del(&node->list); + kfree(node); + } + mutex_unlock(&state_lock); +} +module_exit(devfreq_cpufreq_exit); + +MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/heaps/qcom_dt_parser.c b/drivers/dma-buf/heaps/qcom_dt_parser.c index 521981c4398b..61c95ea620bc 100644 --- a/drivers/dma-buf/heaps/qcom_dt_parser.c +++ b/drivers/dma-buf/heaps/qcom_dt_parser.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -66,8 +66,6 @@ void free_pdata(const struct platform_data *pdata) static int heap_dt_init(struct device_node *mem_node, struct platform_heap *heap) { - const __be32 *basep; - u64 base, size; struct device *dev = heap->dev; struct reserved_mem *rmem; int ret = 0; @@ -97,19 +95,8 @@ static int heap_dt_init(struct device_node *mem_node, } } - basep = of_get_address(mem_node, 0, &size, NULL); - if (basep) { - base = of_translate_address(mem_node, basep); - if (base != OF_BAD_ADDR) { - heap->base = base; - heap->size = size; - } else { - ret = -EINVAL; - dev_err(heap->dev, - "Failed to get heap base/size\n"); - of_reserved_mem_device_release(dev); - } - } + heap->base = rmem->base; + heap->size = rmem->size; heap->is_nomap = of_property_read_bool(mem_node, "no-map"); return ret; diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 9c015d4c3adc..fcbb55f9a81e 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -207,6 +207,10 @@ static enum qcom_scm_convention __get_convention(void) * Per the "SMC calling convention specification", the 64-bit calling * convention can only be used when the client is 64-bit, otherwise * system will encounter the undefined behaviour. + * When running on 32bit kernel, SCM call with convention + * SMC_CONVENTION_ARM_64 is causing the system crash. To avoid that + * use SMC_CONVENTION_ARM_64 for 64bit kernel and SMC_CONVENTION_ARM_32 + * for 32bit kernel. */ #if IS_ENABLED(CONFIG_ARM64) /* diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c index 1235906ad444..de59a3ed05f6 100644 --- a/drivers/interconnect/qcom/icc-rpmh.c +++ b/drivers/interconnect/qcom/icc-rpmh.c @@ -370,8 +370,8 @@ static struct regmap *qcom_icc_rpmh_map(struct platform_device *pdev, static bool is_voter_disabled(char *voter) { - if ((strnstr(voter, "disp", strlen(voter)) && - (socinfo_get_part_info(PART_DISPLAY) || socinfo_get_part_info(PART_DISPLAY1))) || + if ((!strcmp(voter, "disp") && socinfo_get_part_info(PART_DISPLAY)) || + (!strcmp(voter, "disp2") && socinfo_get_part_info(PART_DISPLAY1)) || (strnstr(voter, "cam", strlen(voter)) && socinfo_get_part_info(PART_CAMERA))) return true; @@ -405,7 +405,11 @@ static int qcom_icc_init_disabled_parts(struct qcom_icc_provider *qp) if (!qn) continue; - if (strnstr(qn->name, voter_name, strlen(qn->name))) + /* Find the ICC node to be disabled by comparing voter_name in + * node name string, adjust the start position accordingly + */ + if (!strcmp(qn->name + (strlen(qn->name) - strlen(voter_name)), + voter_name)) qn->disabled = true; } } diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 727c7046a6d1..fbc6488bd890 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -513,6 +513,15 @@ config QSEECOM_PROXY These callback functions can be used to start, shutdown and send commands to the trusted apps. +config BOOTMARKER_PROXY + tristate "To enable Bootmarker proxy driver for kernel client" + help + Bootmarker proxy driver serves the kernel clients by providing + required ops via call back functions with a minimal framework. + These callback functions can be used to access the place marker. + To compile this driver as a module, choose M here. + If unsure, say N. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f5c57b1a1c1a..414ca8123f13 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -64,3 +64,4 @@ obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/ obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o obj-$(CONFIG_QSEECOM_PROXY) += qseecom_proxy.o +obj-$(CONFIG_BOOTMARKER_PROXY) += bootmarker_proxy.o diff --git a/drivers/misc/bootmarker_proxy.c b/drivers/misc/bootmarker_proxy.c new file mode 100644 index 000000000000..8dd8dabedbde --- /dev/null +++ b/drivers/misc/bootmarker_proxy.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +static struct bootmarker_drv_ops bootmarker_fun_ops = {0}; + +int provide_bootmarker_kernel_fun_ops(const struct bootmarker_drv_ops *ops) +{ + if (!ops) { + pr_err("ops is NULL\n"); + return -EINVAL; + } + bootmarker_fun_ops = *ops; + pr_debug("Boot Marker proxy Ready to be served\n"); + return 0; +} +EXPORT_SYMBOL_GPL(provide_bootmarker_kernel_fun_ops); + + +int bootmarker_place_marker(const char *name) +{ + int32_t ret = -EPERM; + + if (bootmarker_fun_ops.bootmarker_place_marker) { + ret = bootmarker_fun_ops.bootmarker_place_marker(name); + if (ret != 0) + pr_err("%s: command failed = %d\n", __func__, ret); + } else { + pr_err_ratelimited("bootmarker driver is not up yet\n"); + ret = -EAGAIN; + } + return ret; +} +EXPORT_SYMBOL_GPL(bootmarker_place_marker); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Boot Marker proxy driver"); diff --git a/drivers/net/ethernet/qualcomm/emac/Makefile b/drivers/net/ethernet/qualcomm/emac/Makefile index 61d15e091be2..a3eb4436864a 100644 --- a/drivers/net/ethernet/qualcomm/emac/Makefile +++ b/drivers/net/ethernet/qualcomm/emac/Makefile @@ -5,6 +5,5 @@ obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o -qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \ - emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \ - emac-sgmii-qdf2400.o +qcom-emac-objs := emac_main.o emac_hw.o emac_ethtool.o emac_ptp.o \ + emac_phy.o emac_rgmii.o emac_sgmii.o diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0d80447d4d3b..1fe5df34a249 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -1076,7 +1076,7 @@ static void emac_receive_skb(struct emac_rx_queue *rx_q, if (vlan_flag) { u16 vlan; - EMAC_TAG_TO_VLAN(vlan_tag, vlan); + vlan = ((((vlan_tag) >> 8) & 0xFF) | (((vlan_tag) & 0xFF) << 8)); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); } @@ -1449,7 +1449,8 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt, if (skb_vlan_tag_present(skb)) { u16 tag; - EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag); + tag = (((skb_vlan_tag_get(skb) >> 8) & 0xFF) | + ((skb_vlan_tag_get(skb) & 0xFF) << 8)); TPD_CVLAN_TAG_SET(&tpd, tag); TPD_INSTC_SET(&tpd, 1); } diff --git a/drivers/net/ethernet/qualcomm/emac/emac_defines.h b/drivers/net/ethernet/qualcomm/emac/emac_defines.h new file mode 100644 index 000000000000..07b8005e0c64 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_defines.h @@ -0,0 +1,401 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EMAC_DEFINES_H__ +#define __EMAC_DEFINES_H__ + +/* EMAC_DMA_MAS_CTRL */ +#define DEV_ID_NUM_BMSK 0x7f000000 +#define DEV_ID_NUM_SHFT 24 +#define DEV_REV_NUM_BMSK 0xff0000 +#define DEV_REV_NUM_SHFT 16 +#define INT_RD_CLR_EN 0x4000 +#define IRQ_MODERATOR2_EN 0x800 +#define IRQ_MODERATOR_EN 0x400 +#define LPW_CLK_SEL 0x80 +#define LPW_STATE 0x20 +#define LPW_MODE 0x10 +#define SOFT_RST 0x1 + +/* EMAC_IRQ_MOD_TIM_INIT */ +#define IRQ_MODERATOR2_INIT_BMSK 0xffff0000 +#define IRQ_MODERATOR2_INIT_SHFT 16 +#define IRQ_MODERATOR_INIT_BMSK 0xffff +#define IRQ_MODERATOR_INIT_SHFT 0 + +/* EMAC_MDIO_CTRL */ +#define MDIO_MODE 0x40000000 +#define MDIO_PR 0x20000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_BUSY 0x8000000 +#define MDIO_CLK_SEL_BMSK 0x7000000 +#define MDIO_CLK_SEL_SHFT 24 +#define MDIO_START 0x800000 +#define SUP_PREAMBLE 0x400000 +#define MDIO_RD_NWR 0x200000 +#define MDIO_REG_ADDR_BMSK 0x1f0000 +#define MDIO_REG_ADDR_SHFT 16 +#define MDIO_DATA_BMSK 0xffff +#define MDIO_DATA_SHFT 0 + +/* EMAC_PHY_STS */ +#define PHY_ADDR_BMSK 0x1f0000 +#define PHY_ADDR_SHFT 16 + +/* EMAC_MDIO_EX_CTRL */ +#define DEVAD_BMSK 0x1f0000 +#define DEVAD_SHFT 16 +#define EX_REG_ADDR_BMSK 0xffff +#define EX_REG_ADDR_SHFT 0 + +/* EMAC_MAC_CTRL */ +#define SINGLE_PAUSE_MODE 0x10000000 +#define DEBUG_MODE 0x8000000 +#define BROAD_EN 0x4000000 +#define MULTI_ALL 0x2000000 +#define RX_CHKSUM_EN 0x1000000 +#define HUGE 0x800000 +#define SPEED(x) (((x) & 0x3) << 20) +#define SPEED_MASK SPEED(0x3) +#define SIMR 0x80000 +#define TPAUSE 0x10000 +#define PROM_MODE 0x8000 +#define VLAN_STRIP 0x4000 +#define PRLEN_BMSK 0x3c00 +#define PRLEN_SHFT 10 +#define HUGEN 0x200 +#define FLCHK 0x100 +#define PCRCE 0x80 +#define CRCE 0x40 +#define FULLD 0x20 +#define MAC_LP_EN 0x10 +#define RXFC 0x8 +#define TXFC 0x4 +#define RXEN 0x2 +#define TXEN 0x1 + +/* EMAC_WOL_CTRL0 */ +#define LK_CHG_PME 0x20 +#define LK_CHG_EN 0x10 +#define MG_FRAME_PME 0x8 +#define MG_FRAME_EN 0x4 +#define WK_FRAME_EN 0x1 + +/* EMAC_DESC_CTRL_3 */ +#define RFD_RING_SIZE_BMSK 0xfff + +/* EMAC_DESC_CTRL_4 */ +#define RX_BUFFER_SIZE_BMSK 0xffff + +/* EMAC_DESC_CTRL_6 */ +#define RRD_RING_SIZE_BMSK 0xfff + +/* EMAC_DESC_CTRL_9 */ +#define TPD_RING_SIZE_BMSK 0xffff + +/* EMAC_TXQ_CTRL_0 */ +#define NUM_TXF_BURST_PREF_BMSK 0xffff0000 +#define NUM_TXF_BURST_PREF_SHFT 16 +#define LS_8023_SP 0x80 +#define TXQ_MODE 0x40 +#define TXQ_EN 0x20 +#define IP_OP_SP 0x10 +#define NUM_TPD_BURST_PREF_BMSK 0xf +#define NUM_TPD_BURST_PREF_SHFT 0 + +/* EMAC_TXQ_CTRL_1 */ +#define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff + +/* EMAC_TXQ_CTRL_2 */ +#define TXF_HWM_BMSK 0xfff0000 +#define TXF_LWM_BMSK 0xfff + +/* EMAC_RXQ_CTRL_0 */ +#define RXQ_EN 0x80000000 +#define CUT_THRU_EN 0x40000000 +#define RSS_HASH_EN 0x20000000 +#define NUM_RFD_BURST_PREF_BMSK 0x3f00000 +#define NUM_RFD_BURST_PREF_SHFT 20 +#define IDT_TABLE_SIZE_BMSK 0x1ff00 +#define IDT_TABLE_SIZE_SHFT 8 +#define SP_IPV6 0x80 + +/* EMAC_RXQ_CTRL_1 */ +#define JUMBO_1KAH_BMSK 0xf000 +#define JUMBO_1KAH_SHFT 12 +#define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0 +#define RFD_PREF_LOW_THRESHOLD_SHFT 6 +#define RFD_PREF_UP_THRESHOLD_BMSK 0x3f +#define RFD_PREF_UP_THRESHOLD_SHFT 0 + +/* EMAC_RXQ_CTRL_2 */ +#define RXF_DOF_THRESHOLD_BMSK 0xfff0000 +#define RXF_DOF_THRESHOLD_SHFT 16 +#define RXF_UOF_THRESHOLD_BMSK 0xfff +#define RXF_UOF_THRESHOLD_SHFT 0 + +/* EMAC_RXQ_CTRL_3 */ +#define RXD_TIMER_BMSK 0xffff0000 +#define RXD_THRESHOLD_BMSK 0xfff +#define RXD_THRESHOLD_SHFT 0 + +/* EMAC_DMA_CTRL */ +#define DMAW_DLY_CNT_BMSK 0xf0000 +#define DMAW_DLY_CNT_SHFT 16 +#define DMAR_DLY_CNT_BMSK 0xf800 +#define DMAR_DLY_CNT_SHFT 11 +#define DMAR_REQ_PRI 0x400 +#define REGWRBLEN_BMSK 0x380 +#define REGWRBLEN_SHFT 7 +#define REGRDBLEN_BMSK 0x70 +#define REGRDBLEN_SHFT 4 +#define OUT_ORDER_MODE 0x4 +#define ENH_ORDER_MODE 0x2 +#define IN_ORDER_MODE 0x1 + +/* EMAC_MAILBOX_13 */ +#define RFD3_PROC_IDX_BMSK 0xfff0000 +#define RFD3_PROC_IDX_SHFT 16 +#define RFD3_PROD_IDX_BMSK 0xfff +#define RFD3_PROD_IDX_SHFT 0 + +/* EMAC_MAILBOX_2 */ +#define NTPD_CONS_IDX_BMSK 0xffff0000 +#define NTPD_CONS_IDX_SHFT 16 + +/* EMAC_MAILBOX_3 */ +#define RFD0_CONS_IDX_BMSK 0xfff +#define RFD0_CONS_IDX_SHFT 0 + +/* EMAC_INT_STATUS */ +#define DIS_INT BIT(31) +#define PTP_INT BIT(30) +#define RFD4_UR_INT BIT(29) +#define TX_PKT_INT3 BIT(26) +#define TX_PKT_INT2 BIT(25) +#define TX_PKT_INT1 BIT(24) +#define RX_PKT_INT3 BIT(19) +#define RX_PKT_INT2 BIT(18) +#define RX_PKT_INT1 BIT(17) +#define RX_PKT_INT0 BIT(16) +#define TX_PKT_INT BIT(15) +#define TXQ_TO_INT BIT(14) +#define GPHY_WAKEUP_INT BIT(13) +#define GPHY_LINK_DOWN_INT BIT(12) +#define GPHY_LINK_UP_INT BIT(11) +#define DMAW_TO_INT BIT(10) +#define DMAR_TO_INT BIT(9) +#define TXF_UR_INT BIT(8) +#define RFD3_UR_INT BIT(7) +#define RFD2_UR_INT BIT(6) +#define RFD1_UR_INT BIT(5) +#define RFD0_UR_INT BIT(4) +#define RXF_OF_INT BIT(3) +#define SW_MAN_INT BIT(2) + +/* EMAC_INT_RETRIG_INIT */ +#define INT_RETRIG_TIME_BMSK 0xffff + +/* EMAC_MAILBOX_11 */ +#define H3TPD_PROD_IDX_BMSK 0xffff0000 +#define H3TPD_PROD_IDX_SHFT 16 + +/* EMAC_AXI_MAST_CTRL */ +#define DATA_BYTE_SWAP 0x8 +#define MAX_BOUND 0x2 +#define MAX_BTYPE 0x1 + +/* EMAC_MAILBOX_12 */ +#define H3TPD_CONS_IDX_BMSK 0xffff0000 +#define H3TPD_CONS_IDX_SHFT 16 + +/* EMAC_MAILBOX_9 */ +#define H2TPD_PROD_IDX_BMSK 0xffff +#define H2TPD_PROD_IDX_SHFT 0 + +/* EMAC_MAILBOX_10 */ +#define H1TPD_CONS_IDX_BMSK 0xffff0000 +#define H1TPD_CONS_IDX_SHFT 16 +#define H2TPD_CONS_IDX_BMSK 0xffff +#define H2TPD_CONS_IDX_SHFT 0 + +/* EMAC_ATHR_HEADER_CTRL */ +#define HEADER_CNT_EN 0x2 +#define HEADER_ENABLE 0x1 + +/* EMAC_MAILBOX_0 */ +#define RFD0_PROC_IDX_BMSK 0xfff0000 +#define RFD0_PROC_IDX_SHFT 16 +#define RFD0_PROD_IDX_BMSK 0xfff +#define RFD0_PROD_IDX_SHFT 0 + +/* EMAC_MAILBOX_5 */ +#define RFD1_PROC_IDX_BMSK 0xfff0000 +#define RFD1_PROC_IDX_SHFT 16 +#define RFD1_PROD_IDX_BMSK 0xfff +#define RFD1_PROD_IDX_SHFT 0 + +/* EMAC_MAILBOX_6 */ +#define RFD2_PROC_IDX_BMSK 0xfff0000 +#define RFD2_PROC_IDX_SHFT 16 +#define RFD2_PROD_IDX_BMSK 0xfff +#define RFD2_PROD_IDX_SHFT 0 + +/* EMAC_CORE_HW_VERSION */ +#define MAJOR_BMSK 0xf0000000 +#define MAJOR_SHFT 28 +#define MINOR_BMSK 0xfff0000 +#define MINOR_SHFT 16 +#define STEP_BMSK 0xffff +#define STEP_SHFT 0 + +/* EMAC_MISC_CTRL */ +#define RX_UNCPL_INT_EN 0x1 + +/* EMAC_MAILBOX_7 */ +#define RFD2_CONS_IDX_BMSK 0xfff0000 +#define RFD2_CONS_IDX_SHFT 16 +#define RFD1_CONS_IDX_BMSK 0xfff +#define RFD1_CONS_IDX_SHFT 0 + +/* EMAC_MAILBOX_8 */ +#define RFD3_CONS_IDX_BMSK 0xfff +#define RFD3_CONS_IDX_SHFT 0 + +/* EMAC_MAILBOX_15 */ +#define NTPD_PROD_IDX_BMSK 0xffff +#define NTPD_PROD_IDX_SHFT 0 + +/* EMAC_MAILBOX_16 */ +#define H1TPD_PROD_IDX_BMSK 0xffff +#define H1TPD_PROD_IDX_SHFT 0 + +/* EMAC_EMAC_WRAPPER_CSR1 */ +#define TX_INDX_FIFO_SYNC_RST BIT(23) +#define TX_TS_FIFO_SYNC_RST BIT(22) +#define RX_TS_FIFO2_SYNC_RST BIT(21) +#define RX_TS_FIFO1_SYNC_RST BIT(20) +#define TX_TS_ENABLE BIT(16) +#define DIS_1588_CLKS BIT(11) +#define FREQ_MODE BIT(9) +#define ENABLE_RRD_TIMESTAMP BIT(3) + +/* EMAC_EMAC_WRAPPER_CSR2 */ +#define HDRIVE_BMSK 0x3000 +#define HDRIVE_SHFT 12 +#define SLB_EN 0x200 +#define PLB_EN 0x100 +#define WOL_EN 0x80 +#define CKEDGE_SEL 0x40 +#define TX_ID_EN_L 0x20 +#define RX_ID_EN_L 0x10 +#define RGMII_PHY_MODE_BMSK 0x6 +#define RGMII_PHY_MODE_SHFT 1 +#define PHY_RESET 0x1 + +/* EMAC_EMAC_WRAPPER_CSR3 */ +#define PLL_RESET 0x1000000 +#define PLL_L_VAL_5_0_BMSK 0xfc0000 +#define PLL_L_VAL_5_0_SHFT 18 +#define BYPASSNL 0x10000 + +/* EMAC_EMAC_WRAPPER_CSR5 */ +#define RMII_125_CLK_EN 0x20 + +/* EMAC_EMAC_WRAPPER_CSR10 */ +#define RD_CLR_1588 0x2 +#define DIS_1588 0x1 + +/* EMAC_EMAC_WRAPPER_STATUS */ +#define PLL_LOCK_DET 0x1 + +/* EMAC_EMAC_WRAPPER_TX_TS_INX */ +#define EMAC_WRAPPER_TX_TS_EMPTY 0x80000000 +#define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff + +/* EMAC_P1588_CTRL_REG */ +#define ATTACH_EN 0x10 +#define BYPASS_O 0x8 +#define CLOCK_MODE_BMSK 0x6 +#define CLOCK_MODE_SHFT 1 +#define ETH_MODE_SW 0x1 + +/* EMAC_P1588_TX_LATENCY */ +#define TX_LATENCY_BMSK 0xffff +#define TX_LATENCY_SHFT 0 + +/* EMAC_P1588_INC_VALUE_2 */ +#define INC_VALUE_2_BMSK 0xffff + +/* EMAC_P1588_INC_VALUE_1 */ +#define INC_VALUE_1_BMSK 0xffff + +/* EMAC_P1588_NANO_OFFSET_2 */ +#define NANO_OFFSET_2_BMSK 0xffff + +/* EMAC_P1588_NANO_OFFSET_1 */ +#define NANO_OFFSET_1_BMSK 0xffff + +/* EMAC_P1588_SEC_OFFSET_2 */ +#define SEC_OFFSET_2_BMSK 0xffff + +/* EMAC_P1588_SEC_OFFSET_1 */ +#define SEC_OFFSET_1_BMSK 0xffff + +/* EMAC_P1588_REAL_TIME_5 */ +#define REAL_TIME_5_BMSK 0xffff +#define REAL_TIME_5_SHFT 0 + +/* EMAC_P1588_REAL_TIME_4 */ +#define REAL_TIME_4_BMSK 0xffff +#define REAL_TIME_4_SHFT 0 + +/* EMAC_P1588_REAL_TIME_3 */ +#define REAL_TIME_3_BMSK 0xffff +#define REAL_TIME_3_SHFT 0 + +/* EMAC_P1588_REAL_TIME_2 */ +#define REAL_TIME_2_BMSK 0xffff +#define REAL_TIME_2_SHFT 0 + +/* EMAC_P1588_REAL_TIME_1 */ +#define REAL_TIME_1_BMSK 0xffff +#define REAL_TIME_1_SHFT 0 + +/* EMAC_P1588_EXPANDED_INT_STATUS */ +#define PPS_IN 0x20 + +/* EMAC_P1588_RTC_EXPANDED_CONFIG */ +#define RTC_READ_MODE 0x20 +#define RTC_SNAPSHOT 0x10 +#define LOAD_RTC 0x1 + +/* EMAC_P1588_RTC_PRELOADED_4 */ +#define RTC_PRELOADED_4_BMSK 0xffff + +/* EMAC_P1588_RTC_PRELOADED_3 */ +#define RTC_PRELOADED_3_BMSK 0xffff + +/* EMAC_P1588_RTC_PRELOADED_2 */ +#define RTC_PRELOADED_2_BMSK 0xffff + +/* EMAC_P1588_RTC_PRELOADED_1 */ +#define RTC_PRELOADED_1_BMSK 0xffff + +/* EMAC_P1588_GRAND_MASTER_CONFIG_0 */ +#define GRANDMASTER_MODE 0x40 +#define GM_PPS_SYNC 0x20 + +#endif /* __EMAC_DEFINES_H__ */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac_ethtool.c new file mode 100644 index 000000000000..9472f3d5877b --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_ethtool.c @@ -0,0 +1,410 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC Ethernet Controller ethtool support + */ + +#include +#include + +#include "emac_main.h" +#include "emac_hw.h" + +#define EMAC_MAX_REG_SIZE 10 +#define EMAC_STATS_LEN 51 +static const char *const emac_ethtool_stat_strings[] = { + "rx ok", + "rx bcast", + "rx mcast", + "rx pause", + "rx ctrl", + "rx fcs err", + "rx len err", + "rx byte cnt", + "rx runt", + "rx frag", + "rx sz 64", + "rx sz 65 127", + "rx sz 128 255", + "rx sz 256 511", + "rx sz 512 1023", + "rx sz 1024 1518", + "rx sz 1519 max", + "rx sz ov", + "rx rxf ov", + "rx align err", + "rx bcast byte cnt", + "rx mcast byte cnt", + "rx err addr", + "rx crc align", + "rx jubbers", + "tx ok", + "tx bcast", + "tx mcast", + "tx pause", + "tx exc defer", + "tx ctrl", + "tx defer", + "tx byte cnt", + "tx sz 64", + "tx sz 65 127", + "tx sz 128 255", + "tx sz 256 511", + "tx sz 512 1023", + "tx sz 1024 1518", + "tx sz 1519 max", + "tx 1 col", + "tx 2 col", + "tx late col", + "tx abort col", + "tx underrun", + "tx rd eop", + "tx len err", + "tx trunc", + "tx bcast byte", + "tx mcast byte", + "tx col", +}; + +static void emac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct phy_device *phydev = netdev->phydev; + + pause->autoneg = (phydev->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + pause->rx_pause = (phydev->pause) ? 1 : 0; + pause->tx_pause = (phydev->pause != phydev->asym_pause) ? 1 : 0; +} + +static int emac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_phy *phy = &adpt->phy; + struct phy_device *phydev = netdev->phydev; + enum emac_flow_ctrl req_fc_mode; + bool disable_fc_autoneg; + int ret = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + if (!phydev) + return -ENODEV; + + req_fc_mode = phy->req_fc_mode; + disable_fc_autoneg = phydev->autoneg; + + if (pause->autoneg != AUTONEG_ENABLE) + disable_fc_autoneg = true; + else + disable_fc_autoneg = false; + + if (pause->rx_pause && pause->tx_pause) { + req_fc_mode = EMAC_FC_FULL; + } else if (pause->rx_pause && !pause->tx_pause) { + req_fc_mode = EMAC_FC_RX_PAUSE; + } else if (!pause->rx_pause && pause->tx_pause) { + req_fc_mode = EMAC_FC_TX_PAUSE; + } else if (!pause->rx_pause && !pause->tx_pause) { + req_fc_mode = EMAC_FC_NONE; + } else { + CLR_FLAG(adpt, ADPT_STATE_RESETTING); + return -EINVAL; + } + + pm_runtime_get_sync(netdev->dev.parent); + + if (phy->req_fc_mode != req_fc_mode || + phy->disable_fc_autoneg != disable_fc_autoneg) { + phy->req_fc_mode = req_fc_mode; + phy->disable_fc_autoneg = disable_fc_autoneg; + + if (phydev->autoneg) { + switch (phy->req_fc_mode) { + case EMAC_FC_FULL: + linkmode_set_bit(ADVERTISED_Pause, phydev->supported); + linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->supported); + linkmode_set_bit(ADVERTISED_Pause, phydev->advertising); + linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->advertising); + break; + case EMAC_FC_TX_PAUSE: + linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->supported); + linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->advertising); + break; + default: + linkmode_clear_bit(ADVERTISED_Pause, phydev->supported); + linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->supported); + linkmode_clear_bit(ADVERTISED_Pause, phydev->advertising); + linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->advertising); + break; + } + if (phy->disable_fc_autoneg) { + linkmode_clear_bit(ADVERTISED_Pause, phydev->supported); + linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->supported); + linkmode_clear_bit(ADVERTISED_Pause, phydev->advertising); + linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->advertising); + } + } + + if (phy->external) + ret = phy_start_aneg(phydev); + + if (ret > 0) + emac_phy_config_fc(adpt); + } + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + + return ret; +} + +static u32 emac_get_msglevel(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + return adpt->msg_enable; +} + +static void emac_set_msglevel(struct net_device *netdev, u32 data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + adpt->msg_enable = data; +} + +static int emac_get_regs_len(struct net_device *netdev) +{ + return EMAC_MAX_REG_SIZE * sizeof(32); +} + +static void emac_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *buff) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + u16 i; + u32 *val = buff; + static const u32 reg[EMAC_MAX_REG_SIZE] = { + EMAC_DMA_MAS_CTRL, EMAC_MAC_CTRL, EMAC_WOL_CTRL0, + EMAC_TXQ_CTRL_0, EMAC_RXQ_CTRL_0, EMAC_DMA_CTRL, EMAC_INT_MASK, + EMAC_AXI_MAST_CTRL, EMAC_CORE_HW_VERSION, EMAC_MISC_CTRL, + }; + + regs->version = 0; + regs->len = EMAC_MAX_REG_SIZE * sizeof(u32); + + memset(val, 0, EMAC_MAX_REG_SIZE * sizeof(u32)); + pm_runtime_get_sync(netdev->dev.parent); + for (i = 0; i < ARRAY_SIZE(reg); i++) + val[i] = emac_reg_r32(hw, EMAC, reg[i]); + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); +} + +static void emac_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + strscpy(drvinfo->driver, adpt->netdev->name, + sizeof(drvinfo->driver)); + strscpy(drvinfo->version, "Revision: 1.1.0.0", + sizeof(drvinfo->version)); + strscpy(drvinfo->bus_info, dev_name(&netdev->dev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = emac_get_regs_len(netdev); +} + +static int emac_wol_exclusion(struct emac_adapter *adpt, + struct ethtool_wolinfo *wol) +{ + struct emac_hw *hw = &adpt->hw; + + /* WOL not supported except for the following */ + switch (hw->devid) { + case EMAC_DEV_ID: + return 0; + default: + wol->supported = 0; + return -EINVAL; + } +} + +static void emac_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adpt->wol & EMAC_WOL_MAGIC) + wol->wolopts |= WAKE_MAGIC; + if (adpt->wol & EMAC_WOL_PHY) + wol->wolopts |= WAKE_PHY; +} + +static int emac_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + u32 ret = 0; + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + + if (emac_wol_exclusion(adpt, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + /* Enable WOL interrupt */ + ret = phy_ethtool_set_wol(phydev, wol); + if (ret) + return ret; + + adpt->wol = 0; + if (wol->wolopts & WAKE_MAGIC) { + adpt->wol |= EMAC_WOL_MAGIC; + emac_wol_gpio_irq(adpt, true); + /* Release wakelock */ + __pm_relax(adpt->link_wlock); + } + + if (wol->wolopts & WAKE_PHY) + adpt->wol |= EMAC_WOL_PHY; + + return ret; +} + +static void emac_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + ring->rx_max_pending = EMAC_MAX_RX_DESCS; + ring->tx_max_pending = EMAC_MAX_TX_DESCS; + ring->rx_pending = adpt->num_rxdescs; + ring->tx_pending = adpt->num_txdescs; +} + +static int emac_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + int retval = 0; + + if (ring->rx_mini_pending || ring->rx_jumbo_pending) + return -EINVAL; + + adpt->num_txdescs = clamp_t(u32, ring->tx_pending, + EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS); + + adpt->num_rxdescs = clamp_t(u32, ring->rx_pending, + EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS); + + if (netif_running(netdev)) + retval = emac_resize_rings(netdev); + + return retval; +} + +static int emac_nway_reset(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + + return 0; +} + +static int emac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return 0; + case ETH_SS_STATS: + return EMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u16 i; + + switch (stringset) { + case ETH_SS_TEST: + break; + case ETH_SS_STATS: + for (i = 0; i < EMAC_STATS_LEN; i++) { + strscpy(data, emac_ethtool_stat_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static void emac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + emac_update_hw_stats(adpt); + memcpy(data, &adpt->hw_stats, EMAC_STATS_LEN * sizeof(u64)); +} + +static const struct ethtool_ops emac_ethtool_ops = { + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + + .get_msglevel = emac_get_msglevel, + .set_msglevel = emac_set_msglevel, + + .get_sset_count = emac_get_sset_count, + .get_strings = emac_get_strings, + .get_ethtool_stats = emac_get_ethtool_stats, + + .get_ringparam = emac_get_ringparam, + .set_ringparam = emac_set_ringparam, + + .get_pauseparam = emac_get_pauseparam, + .set_pauseparam = emac_set_pauseparam, + + .nway_reset = emac_nway_reset, + + .get_link = ethtool_op_get_link, + + .get_regs_len = emac_get_regs_len, + .get_regs = emac_get_regs, + + .get_wol = emac_get_wol, + .set_wol = emac_set_wol, + .get_drvinfo = emac_get_drvinfo, +}; + +/* Set ethtool operations */ +void emac_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &emac_ethtool_ops; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac_hw.c b/drivers/net/ethernet/qualcomm/emac/emac_hw.c new file mode 100644 index 000000000000..7ea412599198 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_hw.c @@ -0,0 +1,640 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC Ethernet Controller Hardware support + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "emac_hw.h" +#include "emac_ptp.h" + +#define RFD_PREF_LOW_TH 0x10 +#define RFD_PREF_UP_TH 0x10 +#define JUMBO_1KAH 0x4 + +#define RXF_DOF_TH 0x0be +#define RXF_UOF_TH 0x1a0 + +#define RXD_TH 0x100 + +/* RGMII specific macros */ +#define EMAC_RGMII_PLL_LOCK_TIMEOUT (HZ / 1000) /* 1ms */ +#define EMAC_RGMII_CORE_IE_C 0x2001 +#define EMAC_RGMII_PLL_L_VAL 0x14 +#define EMAC_RGMII_PHY_MODE 0 + +/* REG */ +u32 emac_reg_r32(struct emac_hw *hw, u8 base, u32 reg) +{ + return readl_relaxed(hw->reg_addr[base] + reg); +} + +void emac_reg_w32(struct emac_hw *hw, u8 base, u32 reg, u32 val) +{ + writel_relaxed(val, hw->reg_addr[base] + reg); +} + +void emac_reg_update32(struct emac_hw *hw, u8 base, u32 reg, u32 mask, u32 val) +{ + u32 data; + + data = emac_reg_r32(hw, base, reg); + emac_reg_w32(hw, base, reg, ((data & ~mask) | val)); +} + +u32 emac_reg_field_r32(struct emac_hw *hw, u8 base, u32 reg, + u32 mask, u32 shift) +{ + u32 data; + + data = emac_reg_r32(hw, base, reg); + return (data & mask) >> shift; +} + +/* INTR */ +void emac_hw_enable_intr(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + int i; + + for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) { + struct emac_irq_per_dev *irq = &adpt->irq[i]; + const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[i]; + + emac_reg_w32(hw, EMAC, irq_cmn->status_reg, (u32)~DIS_INT); + emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, irq->mask); + } + + if (adpt->tstamp_en) + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, + hw->ptp_intr_mask); + wmb(); /* ensure that irq and ptp setting are flushed to HW */ +} + +void emac_hw_disable_intr(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + int i; + + for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) { + const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[i]; + + emac_reg_w32(hw, EMAC, irq_cmn->status_reg, DIS_INT); + emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, 0); + } + + if (adpt->tstamp_en) + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, + 0); + + wmb(); /* ensure that irq and ptp setting are flushed to HW */ +} + +/* MC */ +void emac_hw_set_mc_addr(struct emac_hw *hw, u8 *addr) +{ + u32 crc32, bit, reg, mta; + + /* Calculate the CRC of the MAC address */ + crc32 = ether_crc(ETH_ALEN, addr); + + /* The HASH Table is an array of 2 32-bit registers. It is + * treated like an array of 64 bits (BitArray[hash_value]). + * Use the upper 6 bits of the above CRC as the hash value. + */ + reg = (crc32 >> 31) & 0x1; + bit = (crc32 >> 26) & 0x1F; + + mta = emac_reg_r32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2)); + mta |= (0x1 << bit); + emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2), mta); + wmb(); /* ensure that the mac address is flushed to HW */ +} + +void emac_hw_clear_mc_addr(struct emac_hw *hw) +{ + emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0, 0); + emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG1, 0); + wmb(); /* ensure that clearing the mac address is flushed to HW */ +} + +/* definitions for RSS */ +#define EMAC_RSS_KEY(_i, _type) \ + (EMAC_RSS_KEY0 + ((_i) * sizeof(_type))) +#define EMAC_RSS_TBL(_i, _type) \ + (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type))) + +/* RSS */ +void emac_hw_config_rss(struct emac_hw *hw) +{ + int key_len_by_u32 = ARRAY_SIZE(hw->rss_key); + int idt_len_by_u32 = ARRAY_SIZE(hw->rss_idt); + u32 rxq0; + int i; + + /* Fill out hash function keys */ + for (i = 0; i < key_len_by_u32; i++) { + u32 key, idx_base; + + idx_base = (key_len_by_u32 - i) * 4; + key = ((hw->rss_key[idx_base - 1]) | + (hw->rss_key[idx_base - 2] << 8) | + (hw->rss_key[idx_base - 3] << 16) | + (hw->rss_key[idx_base - 4] << 24)); + emac_reg_w32(hw, EMAC, EMAC_RSS_KEY(i, u32), key); + } + + /* Fill out redirection table */ + for (i = 0; i < idt_len_by_u32; i++) + emac_reg_w32(hw, EMAC, EMAC_RSS_TBL(i, u32), hw->rss_idt[i]); + + emac_reg_w32(hw, EMAC, EMAC_BASE_CPU_NUMBER, hw->rss_base_cpu); + + rxq0 = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_0); + if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV4_EN) + rxq0 |= RXQ0_RSS_HSTYP_IPV4_EN; + else + rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_EN; + + if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP4_EN) + rxq0 |= RXQ0_RSS_HSTYP_IPV4_TCP_EN; + else + rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_TCP_EN; + + if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV6_EN) + rxq0 |= RXQ0_RSS_HSTYP_IPV6_EN; + else + rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_EN; + + if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP6_EN) + rxq0 |= RXQ0_RSS_HSTYP_IPV6_TCP_EN; + else + rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_TCP_EN; + + rxq0 |= ((hw->rss_idt_size << IDT_TABLE_SIZE_SHFT) & + IDT_TABLE_SIZE_BMSK); + rxq0 |= RSS_HASH_EN; + + wmb(); /* ensure all parameters are written before we enable RSS */ + emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, rxq0); + wmb(); /* ensure that enabling RSS is flushed to HW */ +} + +/* Config MAC modes */ +void emac_hw_config_mac_ctrl(struct emac_hw *hw) +{ + u32 mac; + + mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); + + if (TEST_FLAG(hw, HW_VLANSTRIP_EN)) + mac |= VLAN_STRIP; + else + mac &= ~VLAN_STRIP; + + if (TEST_FLAG(hw, HW_PROMISC_EN)) + mac |= PROM_MODE; + else + mac &= ~PROM_MODE; + + if (TEST_FLAG(hw, HW_MULTIALL_EN)) + mac |= MULTI_ALL; + else + mac &= ~MULTI_ALL; + + if (TEST_FLAG(hw, HW_LOOPBACK_EN)) + mac |= MAC_LP_EN; + else + mac &= ~MAC_LP_EN; + + emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); + wmb(); /* ensure MAC setting is flushed to HW */ +} + +/* Wake On LAN (WOL) */ +void emac_hw_config_wol(struct emac_hw *hw, u32 wufc) +{ + u32 wol = 0; + + /* turn on magic packet event */ + if (wufc & EMAC_WOL_MAGIC) + wol |= MG_FRAME_EN | MG_FRAME_PME | WK_FRAME_EN; + + /* turn on link up event */ + if (wufc & EMAC_WOL_PHY) + wol |= LK_CHG_EN | LK_CHG_PME; + + emac_reg_w32(hw, EMAC, EMAC_WOL_CTRL0, wol); + wmb(); /* ensure that WOL setting is flushed to HW */ +} + +/* Power Management */ +void emac_hw_config_pow_save(struct emac_hw *hw, u32 speed, + bool wol_en, bool rx_en) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + struct phy_device *phydev = adpt->phydev; + u32 dma_mas, mac; + + dma_mas = emac_reg_r32(hw, EMAC, EMAC_DMA_MAS_CTRL); + dma_mas &= ~LPW_CLK_SEL; + dma_mas |= LPW_STATE; + + mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); + mac &= ~(FULLD | RXEN | TXEN); + mac = (mac & ~SPEED_MASK) | SPEED(1); + + if (wol_en) { + if (rx_en) + mac |= (RXEN | BROAD_EN); + + /* If WOL is enabled, set link speed/duplex for mac */ + if (phydev->speed == SPEED_1000) + mac = (mac & ~SPEED_MASK) | (SPEED(2) & SPEED_MASK); + + if (phydev->duplex == DUPLEX_FULL) + if (phydev->speed == SPEED_10 || + phydev->speed == SPEED_100 || + phydev->speed == SPEED_1000) + mac |= FULLD; + } else { + /* select lower clock speed if WOL is disabled */ + dma_mas |= LPW_CLK_SEL; + } + + emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL, dma_mas); + emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); + wmb(); /* ensure that power setting is flushed to HW */ +} + +/* Config descriptor rings */ +static void emac_mac_dma_rings_config(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + + if (adpt->tstamp_en) { + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + 0, ENABLE_RRD_TIMESTAMP); + } + + /* TPD */ + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_1, + EMAC_DMA_ADDR_HI(adpt->tx_queue[0].tpd.tpdma)); + switch (adpt->num_txques) { + case 4: + emac_reg_w32(hw, EMAC, EMAC_H3TPD_BASE_ADDR_LO, + EMAC_DMA_ADDR_LO(adpt->tx_queue[3].tpd.tpdma)); + fallthrough; + case 3: + emac_reg_w32(hw, EMAC, EMAC_H2TPD_BASE_ADDR_LO, + EMAC_DMA_ADDR_LO(adpt->tx_queue[2].tpd.tpdma)); + fallthrough; + case 2: + emac_reg_w32(hw, EMAC, EMAC_H1TPD_BASE_ADDR_LO, + EMAC_DMA_ADDR_LO(adpt->tx_queue[1].tpd.tpdma)); + fallthrough; + case 1: + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_8, + EMAC_DMA_ADDR_LO(adpt->tx_queue[0].tpd.tpdma)); + break; + default: + emac_err(adpt, "Invalid number of TX queues (%d)\n", + adpt->num_txques); + return; + } + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_9, + adpt->tx_queue[0].tpd.count & TPD_RING_SIZE_BMSK); + + /* RFD & RRD */ + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_0, + EMAC_DMA_ADDR_HI(adpt->rx_queue[0].rfd.rfdma)); + switch (adpt->num_rxques) { + case 4: + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_13, + EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rfd.rfdma)); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_16, + EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rrd.rrdma)); + fallthrough; + case 3: + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_12, + EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rfd.rfdma)); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_15, + EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rrd.rrdma)); + fallthrough; + case 2: + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_10, + EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rfd.rfdma)); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_14, + EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rrd.rrdma)); + fallthrough; + case 1: + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_2, + EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rfd.rfdma)); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_5, + EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rrd.rrdma)); + break; + default: + emac_err(adpt, "Invalid number of RX queues (%d)\n", + adpt->num_rxques); + return; + } + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_3, + adpt->rx_queue[0].rfd.count & RFD_RING_SIZE_BMSK); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_6, + adpt->rx_queue[0].rrd.count & RRD_RING_SIZE_BMSK); + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_4, + adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK); + + emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_11, 0); + + wmb(); /* ensure all parameters are written before we enable them */ + /* Load all of base address above */ + emac_reg_w32(hw, EMAC, EMAC_INTER_SRAM_PART9, 1); + wmb(); /* ensure triggering HW to read ring pointers is flushed */ +} + +/* Config transmit parameters */ +static void emac_hw_config_tx_ctrl(struct emac_hw *hw) +{ + u16 tx_offload_thresh = EMAC_MAX_TX_OFFLOAD_THRESH; + u32 val; + + emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_1, + (tx_offload_thresh >> 3) & + JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK); + + val = (hw->tpd_burst << NUM_TPD_BURST_PREF_SHFT) & + NUM_TPD_BURST_PREF_BMSK; + + val |= (TXQ_MODE | LS_8023_SP); + val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) & + NUM_TXF_BURST_PREF_BMSK; + + emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_0, val); + emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_2, + (TXF_HWM_BMSK | TXF_LWM_BMSK), 0); + wmb(); /* ensure that Tx control settings are flushed to HW */ +} + +/* Config receive parameters */ +static void emac_hw_config_rx_ctrl(struct emac_hw *hw) +{ + u32 val; + + val = ((hw->rfd_burst << NUM_RFD_BURST_PREF_SHFT) & + NUM_RFD_BURST_PREF_BMSK); + val |= (SP_IPV6 | CUT_THRU_EN); + + emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, val); + + val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_1); + val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK | + RFD_PREF_UP_THRESHOLD_BMSK); + val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) | + (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) | + (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT); + emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_1, val); + + val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_2); + val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK); + val |= (RXF_DOF_TH << RXF_DOF_THRESHOLD_SHFT) | + (RXF_UOF_TH << RXF_UOF_THRESHOLD_SHFT); + emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_2, val); + + val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_3); + val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK); + val |= RXD_TH << RXD_THRESHOLD_SHFT; + emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_3, val); + wmb(); /* ensure that Rx control settings are flushed to HW */ +} + +/* Config dma */ +static void emac_hw_config_dma_ctrl(struct emac_hw *hw) +{ + u32 dma_ctrl; + + dma_ctrl = DMAR_REQ_PRI; + + switch (hw->dma_order) { + case emac_dma_ord_in: + dma_ctrl |= IN_ORDER_MODE; + break; + case emac_dma_ord_enh: + dma_ctrl |= ENH_ORDER_MODE; + break; + case emac_dma_ord_out: + dma_ctrl |= OUT_ORDER_MODE; + break; + default: + break; + } + + dma_ctrl |= (((u32)hw->dmar_block) << REGRDBLEN_SHFT) & + REGRDBLEN_BMSK; + dma_ctrl |= (((u32)hw->dmaw_block) << REGWRBLEN_SHFT) & + REGWRBLEN_BMSK; + dma_ctrl |= (((u32)hw->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) & + DMAR_DLY_CNT_BMSK; + dma_ctrl |= (((u32)hw->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) & + DMAW_DLY_CNT_BMSK; + + emac_reg_w32(hw, EMAC, EMAC_DMA_CTRL, dma_ctrl); + wmb(); /* ensure that the DMA configuration is flushed to HW */ +} + +/* Configure MAC */ +void emac_hw_config_mac(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + u32 val; + + emac_hw_set_mac_addr(hw, (u8 *)adpt->netdev->dev_addr); + + emac_mac_dma_rings_config(hw); + + emac_reg_w32(hw, EMAC, EMAC_MAX_FRAM_LEN_CTRL, + adpt->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); + + emac_hw_config_tx_ctrl(hw); + emac_hw_config_rx_ctrl(hw); + emac_hw_config_dma_ctrl(hw); + + if (TEST_FLAG(hw, HW_PTP_CAP)) + emac_ptp_config(hw); + + val = emac_reg_r32(hw, EMAC, EMAC_AXI_MAST_CTRL); + val &= ~(DATA_BYTE_SWAP | MAX_BOUND); + val |= MAX_BTYPE; + emac_reg_w32(hw, EMAC, EMAC_AXI_MAST_CTRL, val); + + emac_reg_w32(hw, EMAC, EMAC_CLK_GATE_CTRL, 0); + emac_reg_w32(hw, EMAC, EMAC_MISC_CTRL, RX_UNCPL_INT_EN); + wmb(); /* ensure that the MAC configuration is flushed to HW */ +} + +/* Reset MAC */ +void emac_hw_reset_mac(struct emac_hw *hw) +{ + emac_reg_w32(hw, EMAC, EMAC_INT_MASK, 0); + emac_reg_w32(hw, EMAC, EMAC_INT_STATUS, DIS_INT); + + emac_hw_stop_mac(hw); + + emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, SOFT_RST); + wmb(); /* ensure mac is fully reset */ + usleep_range(100, 150); + + /* interrupt clear-on-read */ + emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); + wmb(); /* ensure the interrupt clear-on-read setting is flushed to HW */ +} + +/* Start MAC */ +void emac_hw_start_mac(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + struct phy_device *phydev = adpt->phydev; + u32 mac, csr1; + + /* enable tx queue */ + if (adpt->num_txques && adpt->num_txques <= EMAC_MAX_TX_QUEUES) + emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, 0, TXQ_EN); + + /* enable rx queue */ + if (adpt->num_rxques && adpt->num_rxques <= EMAC_MAX_RX_QUEUES) + emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, 0, RXQ_EN); + + /* enable mac control */ + mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); + csr1 = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1); + + mac |= TXEN | RXEN; /* enable RX/TX */ + + /* Configure MAC flow control to match the PHY's settings. */ + if (phydev->pause) + mac |= RXFC; + if (phydev->pause != phydev->asym_pause) + mac |= TXFC; + + /* setup link speed */ + mac &= ~SPEED_MASK; + + if (phydev->phy_id == QCA8337_PHY_ID) { + mac |= SPEED(2); + csr1 |= FREQ_MODE; + mac |= FULLD; + } else { + switch (phydev->speed) { + case SPEED_1000: + mac |= SPEED(2); + csr1 |= FREQ_MODE; + break; + default: + mac |= SPEED(1); + csr1 &= ~FREQ_MODE; + break; + } + + if (phydev->duplex == DUPLEX_FULL) + mac |= FULLD; + else + mac &= ~FULLD; + } + + /* other parameters */ + mac |= (CRCE | PCRCE); + mac |= ((hw->preamble << PRLEN_SHFT) & PRLEN_BMSK); + mac |= BROAD_EN; + mac |= FLCHK; + mac &= ~RX_CHKSUM_EN; + mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | + DEBUG_MODE | SINGLE_PAUSE_MODE); + + emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, csr1); + emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); + + /* enable interrupt read clear, low power sleep mode and + * the irq moderators + */ + emac_reg_w32(hw, EMAC, EMAC_IRQ_MOD_TIM_INIT, hw->irq_mod); + emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL, + (INT_RD_CLR_EN | LPW_MODE | + IRQ_MODERATOR_EN | IRQ_MODERATOR2_EN)); + + if (TEST_FLAG(hw, HW_PTP_CAP)) + emac_ptp_set_linkspeed(hw, phydev->speed); + + emac_hw_config_mac_ctrl(hw); + + emac_reg_update32(hw, EMAC, EMAC_ATHR_HEADER_CTRL, + (HEADER_ENABLE | HEADER_CNT_EN), 0); + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); + wmb(); /* ensure that MAC setting are flushed to HW */ +} + +/* Stop MAC */ +void emac_hw_stop_mac(struct emac_hw *hw) +{ + emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, RXQ_EN, 0); + emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, TXQ_EN, 0); + emac_reg_update32(hw, EMAC, EMAC_MAC_CTRL, (TXEN | RXEN), 0); + wmb(); /* ensure mac is stopped before we proceed */ + usleep_range(1000, 1050); +} + +/* set MAC address */ +void emac_hw_set_mac_addr(struct emac_hw *hw, u8 *addr) +{ + u32 sta; + + /* for example: 00-A0-C6-11-22-33 + * 0<-->C6112233, 1<-->00A0. + */ + + /* low 32bit word */ + sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) | + (((u32)addr[4]) << 8) | (((u32)addr[5])); + emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR0, sta); + + /* hight 32bit word */ + sta = (((u32)addr[0]) << 8) | (((u32)addr[1])); + emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR1, sta); + wmb(); /* ensure that the MAC address is flushed to HW */ +} + +/* Read one entry from the HW tx timestamp FIFO */ +bool emac_hw_read_tx_tstamp(struct emac_hw *hw, struct emac_hwtxtstamp *ts) +{ + u32 ts_idx; + + ts_idx = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_INX); + + if (ts_idx & EMAC_WRAPPER_TX_TS_EMPTY) + return false; + + ts->ns = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_LO); + ts->sec = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_HI); + ts->ts_idx = ts_idx & EMAC_WRAPPER_TX_TS_INX_BMSK; + + return true; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac_hw.h b/drivers/net/ethernet/qualcomm/emac/emac_hw.h new file mode 100644 index 000000000000..9fd30896fbd1 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_hw.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EMAC_HW_H_ +#define _EMAC_HW_H_ + +#include + +#include "emac_main.h" +#include "emac_regs.h" +#include "emac_defines.h" + +/* function prototype */ + +/* REG */ +u32 emac_reg_r32(struct emac_hw *hw, u8 base, u32 reg); +void emac_reg_w32(struct emac_hw *hw, u8 base, u32 reg, u32 val); +void emac_reg_update32(struct emac_hw *hw, u8 base, u32 reg, + u32 mask, u32 val); +u32 emac_reg_field_r32(struct emac_hw *hw, u8 base, u32 reg, + u32 mask, u32 shift); +void emac_hw_config_pow_save(struct emac_hw *hw, u32 speed, bool wol_en, + bool rx_en); +/* MAC */ +void emac_hw_enable_intr(struct emac_hw *hw); +void emac_hw_disable_intr(struct emac_hw *hw); +void emac_hw_set_mc_addr(struct emac_hw *hw, u8 *addr); +void emac_hw_clear_mc_addr(struct emac_hw *hw); + +void emac_hw_config_mac_ctrl(struct emac_hw *hw); +void emac_hw_config_rss(struct emac_hw *hw); +void emac_hw_config_wol(struct emac_hw *hw, u32 wufc); +int emac_hw_config_fc(struct emac_hw *hw); + +void emac_hw_reset_mac(struct emac_hw *hw); +void emac_hw_config_mac(struct emac_hw *hw); +void emac_hw_start_mac(struct emac_hw *hw); +void emac_hw_stop_mac(struct emac_hw *hw); + +void emac_hw_set_mac_addr(struct emac_hw *hw, u8 *addr); + +/* TX Timestamp */ +bool emac_hw_read_tx_tstamp(struct emac_hw *hw, struct emac_hwtxtstamp *ts); + +#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT) + +#define IMR_EXTENDED_MASK (\ + SW_MAN_INT |\ + ISR_OVER |\ + ISR_ERROR |\ + ISR_TX_PKT) + +#define ISR_RX_PKT (\ + RX_PKT_INT0 |\ + RX_PKT_INT1 |\ + RX_PKT_INT2 |\ + RX_PKT_INT3) + +#define ISR_TX_PKT (\ + TX_PKT_INT |\ + TX_PKT_INT1 |\ + TX_PKT_INT2 |\ + TX_PKT_INT3) + +#define ISR_GPHY_LINK (\ + GPHY_LINK_UP_INT |\ + GPHY_LINK_DOWN_INT) + +#define ISR_OVER (\ + RFD0_UR_INT |\ + RFD1_UR_INT |\ + RFD2_UR_INT |\ + RFD3_UR_INT |\ + RFD4_UR_INT |\ + RXF_OF_INT |\ + TXF_UR_INT) + +#define ISR_ERROR (\ + DMAR_TO_INT |\ + DMAW_TO_INT |\ + TXQ_TO_INT) + +#define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0 +#define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22 +#define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0 +#define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24 + +#define RXQ0_NUM_RFD_PREF_DEF 8 +#define TXQ0_NUM_TPD_PREF_DEF 5 + +#define EMAC_PREAMBLE_DEF 7 + +#define DMAR_DLY_CNT_DEF 15 +#define DMAW_DLY_CNT_DEF 4 + +#define MDIO_CLK_25_4 0 + +#define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20 +#define RXQ0_RSS_HSTYP_IPV6_EN 0x10 +#define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8 +#define RXQ0_RSS_HSTYP_IPV4_EN 0x4 + +#define MASTER_CTRL_CLK_SEL_DIS 0x1000 + +#define MDIO_WAIT_TIMES 1000 + +/* PHY */ +#define MII_PSSR 0x11 /* PHY Specific Status Reg */ +#define MII_DBG_ADDR 0x1D /* PHY Debug Address Reg */ +#define MII_DBG_DATA 0x1E /* PHY Debug Data Reg */ +#define MII_INT_ENABLE 0x12 /* PHY Interrupt Enable Reg */ +#define MII_INT_STATUS 0x13 /* PHY Interrupt Status Reg */ + +/* MII_BMCR (0x00) */ +#define BMCR_SPEED10 0x0000 + +/* MII_PSSR (0x11) */ +#define PSSR_FC_RXEN 0x0004 +#define PSSR_FC_TXEN 0x0008 +#define PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* MII DBG registers */ +#define HIBERNATE_CTRL_REG 0xB + +/* HIBERNATE_CTRL_REG */ +#define HIBERNATE_EN 0x8000 + +/* MII_INT_ENABLE/MII_INT_STATUS */ +#define LINK_SUCCESS_INTERRUPT BIT(10) +#define LINK_SUCCESS_BX BIT(7) +#define WOL_INT BIT(0) +#endif /*_EMAC_HW_H_*/ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_main.c b/drivers/net/ethernet/qualcomm/emac/emac_main.c new file mode 100644 index 000000000000..2a46075714f1 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_main.c @@ -0,0 +1,3361 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC Ethernet Controller driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_ACPI) +#include +#include +#include +#endif +#include +#include + +#include "emac_main.h" +#include "emac_phy.h" +#include "emac_hw.h" +#include "emac_ptp.h" +#include "emac_sgmii.h" + +#define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR | NETIF_MSG_TX_QUEUED | \ + NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | \ + NETIF_MSG_PKTDATA | NETIF_MSG_HW | NETIF_MSG_WOL) + +/* Error bits that will result in a received frame being discarded */ +#define EMAC_RRDES_ERROR (EMAC_RRDES_IPF | EMAC_RRDES_CRC | EMAC_RRDES_FAE | \ + EMAC_RRDES_TRN | EMAC_RRDES_RNT | EMAC_RRDES_INC | \ + EMAC_RRDES_FOV | EMAC_RRDES_LEN) + +#define EMAC_RRDES_STATS_DW_IDX 3 +#define EMAC_RRDESC_SIZE 4 +/* The RRD size if timestamping is enabled: */ +#define EMAC_TS_RRDESC_SIZE 6 +#define EMAC_TPDESC_SIZE 4 +#define EMAC_RFDESC_SIZE 2 +#define EMAC_RSS_IDT_SIZE 256 + +#define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb) + +#define EMAC_PINCTRL_STATE_MDIO_CLK_ACTIVE "emac_mdio_clk_active" +#define EMAC_PINCTRL_STATE_MDIO_CLK_SLEEP "emac_mdio_clk_sleep" +#define EMAC_PINCTRL_STATE_MDIO_DATA_ACTIVE "emac_mdio_data_active" +#define EMAC_PINCTRL_STATE_MDIO_DATA_SLEEP "emac_mdio_data_sleep" +#define EMAC_PINCTRL_STATE_EPHY_ACTIVE "emac_ephy_active" +#define EMAC_PINCTRL_STATE_EPHY_SLEEP "emac_ephy_sleep" + +struct emac_skb_cb { + u32 tpd_idx; + unsigned long jiffies; +}; + +#define EMAC_HWTXTSTAMP_CB(skb) ((struct emac_hwtxtstamp_cb *)(skb)->cb) + +struct emac_hwtxtstamp_cb { + u32 sec; + u32 ns; +}; + +static int msm_emac_msglvl = -1; +//Removed for depricated API +//module_param_named(msglvl, msm_emac_msglvl, int, 0664); + +static int msm_emac_intr_ext; +//Removed for depricated API +//module_param_named(intr_ext, msm_emac_intr_ext, int, 0664); + +static irqreturn_t emac_isr(int irq, void *data); +static irqreturn_t emac_wol_isr(int irq, void *data); + +/* EMAC HW has an issue with interrupt assignment because of which receive queue + * 1 is disabled and following receive rss queue to interrupt mapping is used: + * rss-queue intr + * 0 core0 + * 1 core3 (disabled) + * 2 core1 + * 3 core2 + */ +const struct emac_irq_common emac_irq_cmn_tbl[EMAC_IRQ_CNT] = { + { "emac_core0_irq", emac_isr, EMAC_INT_STATUS, EMAC_INT_MASK, + RX_PKT_INT0, 0}, + { "emac_core3_irq", emac_isr, EMAC_INT3_STATUS, EMAC_INT3_MASK, + 0, 0}, + { "emac_core1_irq", emac_isr, EMAC_INT1_STATUS, EMAC_INT1_MASK, + RX_PKT_INT2, 0}, + { "emac_core2_irq", emac_isr, EMAC_INT2_STATUS, EMAC_INT2_MASK, + RX_PKT_INT3, 0}, + { "emac_wol_irq", emac_wol_isr, 0, 0, + 0, 0}, +}; + +static const char * const emac_clk_name[] = { + "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk", + "rx_clk", "sys_clk" +}; + +static const char * const emac_regulator_name[] = { + "emac_vreg1", "emac_vreg2", "emac_vreg3", "emac_vreg4", "emac_vreg5" +}; + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id emac_acpi_match[] = { + { "QCOM8070", 0 }, + { } +} +MODULE_DEVICE_TABLE(acpi, emac_acpi_match); + +static int emac_acpi_clk_set_rate(struct emac_adapter *adpt, + enum emac_clk_id id, u64 rate) +{ + union acpi_object params[4], args; + union acpi_object *obj; + const char duuid[16]; + int ret = 0; + + params[0].type = ACPI_TYPE_INTEGER; + params[0].integer.value = id; + params[1].type = ACPI_TYPE_INTEGER; + params[1].integer.value = rate; + args.type = ACPI_TYPE_PACKAGE; + args.package.count = 2; + args.package.elements = ¶ms[0]; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(adpt->netdev->dev.parent), duuid, + 1, 2, &args); + if (!obj) + return -EINVAL; + + if (obj->type != ACPI_TYPE_INTEGER || obj->integer.value) { + ret = -EINVAL; + emac_err(adpt, + "set clock rate for %d failed\n", clk_info->index); + } + + ACPI_FREE(obj); + return ret; +} + +static int emac_acpi_get_resources(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + struct device *dev = &pdev->dev; + union acpi_object *obj; + const char duuid[16]; + + /* Execute DSM function 1 to initialize the clocks */ + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), duuid, 0x1, 1, NULL); + if (!obj) + return -EINVAL; + + if (obj->type != ACPI_TYPE_INTEGER || obj->integer.value) { + emac_err(adpt, "failed to excute _DSM method function 1\n"); + ACPI_FREE(obj); + return -ENOENT; + } + ACPI_FREE(obj); + return 0; +} +#else +static int emac_acpi_clk_set_rate(struct emac_adapter *adpt, + enum emac_clk_id id, u64 rate) +{ + return -ENXIO; +} + +static int emac_acpi_get_resources(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + return -ENXIO; +} +#endif + +static int emac_clk_prepare_enable(struct emac_adapter *adpt, + enum emac_clk_id id) +{ + int ret = 0; + + if (ACPI_HANDLE(adpt->netdev->dev.parent)) + return 0; + + if (adpt->clk[id].enabled) + return 0; + + ret = clk_prepare_enable(adpt->clk[id].clk); + if (ret) + emac_err(adpt, "error:%d on clk_prepare_enable(%s)\n", ret, + emac_clk_name[id]); + else + adpt->clk[id].enabled = true; + + return ret; +} + +int emac_clk_set_rate(struct emac_adapter *adpt, enum emac_clk_id id, + enum emac_clk_rate rate) +{ + int ret; + + if (ACPI_HANDLE(adpt->netdev->dev.parent)) + return emac_acpi_clk_set_rate(adpt, id, rate); + + ret = clk_set_rate(adpt->clk[id].clk, rate); + if (ret) + emac_err(adpt, "error:%d on clk_set_rate(%s, %d)\n", ret, + emac_clk_name[id], rate); + + return ret; +} + +/* reinitialize */ +int emac_reinit_locked(struct emac_adapter *adpt) +{ + struct net_device *netdev = adpt->netdev; + int ret = 0; + + WARN_ON(in_interrupt()); + + /* Reset might take few 10s of ms */ + while (TEST_N_SET_FLAG(adpt, ADPT_STATE_RESETTING)) + msleep(EMAC_ADPT_RESET_WAIT_TIME); + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) { + CLR_FLAG(adpt, ADPT_STATE_RESETTING); + return -EPERM; + } + + pm_runtime_get_sync(netdev->dev.parent); + + emac_mac_down(adpt, EMAC_HW_CTRL_RESET_MAC); + adpt->phy.ops.reset(adpt); + ret = emac_mac_up(adpt); + + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + + CLR_FLAG(adpt, ADPT_STATE_RESETTING); + return ret; +} + +void emac_task_schedule(struct emac_adapter *adpt) +{ + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN) && + !TEST_FLAG(adpt, ADPT_STATE_WATCH_DOG)) { + SET_FLAG(adpt, ADPT_STATE_WATCH_DOG); + schedule_work(&adpt->work_thread); + } +} + +void emac_check_lsc(struct emac_adapter *adpt) +{ + SET_FLAG(adpt, ADPT_TASK_LSC_REQ); + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN)) + emac_task_schedule(adpt); +} + +/* Respond to a TX hang */ +static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN)) { + SET_FLAG(adpt, ADPT_TASK_REINIT_REQ); + emac_task_schedule(adpt); + } +} + +/* Configure Multicast and Promiscuous modes */ +static void emac_set_rx_mode(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct netdev_hw_addr *ha; + + if (pm_runtime_status_suspended(adpt->netdev->dev.parent)) + return; + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) + return; + + /* Check for Promiscuous and All Multicast modes */ + if (netdev->flags & IFF_PROMISC) { + SET_FLAG(hw, HW_PROMISC_EN); + } else if (netdev->flags & IFF_ALLMULTI) { + SET_FLAG(hw, HW_MULTIALL_EN); + CLR_FLAG(hw, HW_PROMISC_EN); + } else { + CLR_FLAG(hw, HW_MULTIALL_EN); + CLR_FLAG(hw, HW_PROMISC_EN); + } + emac_hw_config_mac_ctrl(hw); + + /* update multicast address filtering */ + emac_hw_clear_mc_addr(hw); + netdev_for_each_mc_addr(ha, netdev) + emac_hw_set_mc_addr(hw, ha->addr); +} + +/* Change MAC address */ +static int emac_set_mac_address(struct net_device *netdev, void *p) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + ether_addr_copy((u8 *)netdev->dev_addr, addr->sa_data); + + pm_runtime_get_sync(netdev->dev.parent); + + emac_hw_set_mac_addr(hw, (u8 *)netdev->dev_addr); + + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + return 0; +} + +/* Push the received skb to upper layers */ +static void emac_receive_skb(struct emac_rx_queue *rxque, + struct sk_buff *skb, + u16 vlan_tag, bool vlan_flag) +{ + if (vlan_flag) { + u16 vlan; + + vlan = ((((vlan_tag) >> 8) & 0xFF) | (((vlan_tag) & 0xFF) << 8)); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + } + + napi_gro_receive(&rxque->napi, skb); +} + +/* Consume next received packet descriptor */ +static bool emac_get_rrdesc(struct emac_rx_queue *rxque, + union emac_sw_rrdesc *srrd) +{ + struct emac_adapter *adpt = netdev_priv(rxque->netdev); + u32 *hrrd = EMAC_RRD(rxque, adpt->rrdesc_size, + rxque->rrd.consume_idx); + + /* If time stamping is enabled, it will be added in the beginning of + * the hw rrd (hrrd). In sw rrd (srrd), dwords 4 & 5 are reserved for + * the time stamp; hence the conversion. + * Also, read the rrd word with update flag first; read rest of rrd + * only if update flag is set. + */ + if (adpt->tstamp_en) + srrd->dfmt.dw[3] = *(hrrd + 5); + else + srrd->dfmt.dw[3] = *(hrrd + 3); + rmb(); /* ensure hw receive returned descriptor timestamp is read */ + + if (!srrd->genr.update) + return false; + + if (adpt->tstamp_en) { + srrd->dfmt.dw[4] = *(hrrd++); + srrd->dfmt.dw[5] = *(hrrd++); + } else { + srrd->dfmt.dw[4] = 0; + srrd->dfmt.dw[5] = 0; + } + + srrd->dfmt.dw[0] = *(hrrd++); + srrd->dfmt.dw[1] = *(hrrd++); + srrd->dfmt.dw[2] = *(hrrd++); + mb(); /* ensure descriptor is read */ + + emac_dbg(adpt, rx_status, adpt->netdev, "RX[%d]:SRRD[%x]: %x:%x:%x:%x:%x:%x\n", + rxque->que_idx, rxque->rrd.consume_idx, srrd->dfmt.dw[0], + srrd->dfmt.dw[1], srrd->dfmt.dw[2], srrd->dfmt.dw[3], + srrd->dfmt.dw[4], srrd->dfmt.dw[5]); + + if (unlikely(srrd->genr.nor != 1)) { + /* multiple rfd not supported */ + emac_err(adpt, "Multi-RFD not support yet! nor = %d\n", + srrd->genr.nor); + } + + /* mark rrd as processed */ + srrd->genr.update = 0; + *hrrd = srrd->dfmt.dw[3]; + + if (++rxque->rrd.consume_idx == rxque->rrd.count) + rxque->rrd.consume_idx = 0; + + return true; +} + +/* Produce new receive free descriptor */ +static bool emac_set_rfdesc(struct emac_rx_queue *rxque, + union emac_sw_rfdesc *srfd) +{ + struct emac_adapter *adpt = netdev_priv(rxque->netdev); + u32 *hrfd = EMAC_RFD(rxque, adpt->rfdesc_size, + rxque->rfd.produce_idx); + + *(hrfd++) = srfd->dfmt.dw[0]; + *hrfd = srfd->dfmt.dw[1]; + + if (++rxque->rfd.produce_idx == rxque->rfd.count) + rxque->rfd.produce_idx = 0; + + return true; +} + +/* Produce new transmit descriptor */ +static bool emac_set_tpdesc(struct emac_tx_queue *txque, + union emac_sw_tpdesc *stpd) +{ + struct emac_adapter *adpt = netdev_priv(txque->netdev); + u32 *htpd; + + txque->tpd.last_produce_idx = txque->tpd.produce_idx; + htpd = EMAC_TPD(txque, adpt->tpdesc_size, txque->tpd.produce_idx); + + if (++txque->tpd.produce_idx == txque->tpd.count) + txque->tpd.produce_idx = 0; + + *(htpd++) = stpd->dfmt.dw[0]; + *(htpd++) = stpd->dfmt.dw[1]; + *(htpd++) = stpd->dfmt.dw[2]; + *htpd = stpd->dfmt.dw[3]; + + emac_dbg(adpt, tx_done, adpt->netdev, "TX[%d]:STPD[%x]: %x:%x:%x:%x\n", + txque->que_idx, txque->tpd.last_produce_idx, stpd->dfmt.dw[0], + stpd->dfmt.dw[1], stpd->dfmt.dw[2], stpd->dfmt.dw[3]); + + return true; +} + +/* Mark the last transmit descriptor as such (for the transmit packet) */ +static void emac_set_tpdesc_lastfrag(struct emac_tx_queue *txque) +{ + struct emac_adapter *adpt = netdev_priv(txque->netdev); + u32 tmp_tpd; + u32 *htpd = EMAC_TPD(txque, adpt->tpdesc_size, + txque->tpd.last_produce_idx); + + tmp_tpd = *(htpd + 1); + tmp_tpd |= EMAC_TPD_LAST_FRAGMENT; + *(htpd + 1) = tmp_tpd; +} + +void emac_set_tpdesc_tstamp_sav(struct emac_tx_queue *txque) +{ + struct emac_adapter *adpt = netdev_priv(txque->netdev); + u32 tmp_tpd; + u32 *htpd = EMAC_TPD(txque, adpt->tpdesc_size, + txque->tpd.last_produce_idx); + + tmp_tpd = *(htpd + 3); + tmp_tpd |= EMAC_TPD_TSTAMP_SAVE; + *(htpd + 3) = tmp_tpd; +} + +/* Fill up receive queue's RFD with preallocated receive buffers */ +static int emac_refresh_rx_buffer(struct emac_rx_queue *rxque) +{ + struct emac_adapter *adpt = netdev_priv(rxque->netdev); + struct emac_hw *hw = &adpt->hw; + struct emac_buffer *curr_rxbuf; + struct emac_buffer *next_rxbuf; + u32 count = 0; + u32 next_produce_idx; + + next_produce_idx = rxque->rfd.produce_idx; + if (++next_produce_idx == rxque->rfd.count) + next_produce_idx = 0; + curr_rxbuf = GET_RFD_BUFFER(rxque, rxque->rfd.produce_idx); + next_rxbuf = GET_RFD_BUFFER(rxque, next_produce_idx); + + /* this always has a blank rx_buffer*/ + while (next_rxbuf->dma == 0) { + struct sk_buff *skb; + union emac_sw_rfdesc srfd; + int ret; + + skb = dev_alloc_skb(adpt->rxbuf_size + NET_IP_ALIGN); + if (unlikely(!skb)) { + emac_err(adpt, "alloc rx buffer failed\n"); + break; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + curr_rxbuf->skb = skb; + curr_rxbuf->length = adpt->rxbuf_size; + curr_rxbuf->dma = dma_map_single(rxque->dev, skb->data, + curr_rxbuf->length, + DMA_FROM_DEVICE); + ret = dma_mapping_error(rxque->dev, curr_rxbuf->dma); + if (ret) { + emac_err(adpt, + "error DMA mapping DMA buffers, err:%lld buf_vrtl:0x%p data_len:%d dma_dir:%s\n", + (u64)curr_rxbuf->dma, skb->data, + curr_rxbuf->length, "DMA_FROM_DEVICE"); + dev_kfree_skb(skb); + break; + } + srfd.genr.addr = curr_rxbuf->dma; + emac_set_rfdesc(rxque, &srfd); + next_produce_idx = rxque->rfd.produce_idx; + if (++next_produce_idx == rxque->rfd.count) + next_produce_idx = 0; + + curr_rxbuf = GET_RFD_BUFFER(rxque, rxque->rfd.produce_idx); + next_rxbuf = GET_RFD_BUFFER(rxque, next_produce_idx); + count++; + } + + if (count) { + u32 prod_idx = (rxque->rfd.produce_idx << rxque->produce_shft) & + rxque->produce_mask; + wmb(); /* ensure that the descriptors are properly set */ + emac_reg_update32(hw, EMAC, rxque->produce_reg, + rxque->produce_mask, prod_idx); + wmb(); /* ensure that the producer's index is flushed to HW */ + emac_dbg(adpt, rx_status, adpt->netdev, "RX[%d]: prod idx 0x%x\n", + rxque->que_idx, rxque->rfd.produce_idx); + } + + return count; +} + +static void emac_clean_rfdesc(struct emac_rx_queue *rxque, + union emac_sw_rrdesc *srrd) +{ + struct emac_buffer *rfbuf = rxque->rfd.rfbuff; + u32 consume_idx = srrd->genr.si; + u16 i; + + for (i = 0; i < srrd->genr.nor; i++) { + rfbuf[consume_idx].skb = NULL; + if (++consume_idx == rxque->rfd.count) + consume_idx = 0; + } + + rxque->rfd.consume_idx = consume_idx; + rxque->rfd.process_idx = consume_idx; +} + +static inline bool emac_skb_cb_expired(struct sk_buff *skb) +{ + if (time_is_after_jiffies(EMAC_SKB_CB(skb)->jiffies + + msecs_to_jiffies(100))) + return false; + return true; +} + +/* proper lock must be acquired before polling */ +static void emac_poll_hwtxtstamp(struct emac_adapter *adpt) +{ + struct sk_buff_head *pending_q = &adpt->hwtxtstamp_pending_queue; + struct sk_buff_head *q = &adpt->hwtxtstamp_ready_queue; + struct sk_buff *skb, *skb_tmp; + struct emac_hwtxtstamp hwtxtstamp; + + while (emac_hw_read_tx_tstamp(&adpt->hw, &hwtxtstamp)) { + bool found = false; + + adpt->hwtxtstamp_stats.rx++; + + skb_queue_walk_safe(pending_q, skb, skb_tmp) { + if (EMAC_SKB_CB(skb)->tpd_idx == hwtxtstamp.ts_idx) { + struct sk_buff *pskb; + + EMAC_HWTXTSTAMP_CB(skb)->sec = hwtxtstamp.sec; + EMAC_HWTXTSTAMP_CB(skb)->ns = hwtxtstamp.ns; + /* the tx timestamps for all the pending + * packets before this one are lost + */ + while ((pskb = __skb_dequeue(pending_q)) + != skb) { + if (!pskb) + break; + EMAC_HWTXTSTAMP_CB(pskb)->sec = 0; + EMAC_HWTXTSTAMP_CB(pskb)->ns = 0; + __skb_queue_tail(q, pskb); + adpt->hwtxtstamp_stats.lost++; + } + __skb_queue_tail(q, skb); + found = true; + break; + } + } + + if (!found) { + emac_dbg(adpt, tx_done, adpt->netdev, + "no entry(tpd=%d) found, drop tx timestamp\n", + hwtxtstamp.ts_idx); + adpt->hwtxtstamp_stats.drop++; + } + } + + skb_queue_walk_safe(pending_q, skb, skb_tmp) { + /* No packet after this one expires */ + if (!emac_skb_cb_expired(skb)) + break; + adpt->hwtxtstamp_stats.timeout++; + emac_dbg(adpt, tx_done, adpt->netdev, + "tx timestamp timeout: tpd_idx=%d\n", + EMAC_SKB_CB(skb)->tpd_idx); + + __skb_unlink(skb, pending_q); + EMAC_HWTXTSTAMP_CB(skb)->sec = 0; + EMAC_HWTXTSTAMP_CB(skb)->ns = 0; + __skb_queue_tail(q, skb); + } +} + +static void emac_schedule_hwtxtstamp_task(struct emac_adapter *adpt) +{ + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) + return; + + if (schedule_work(&adpt->hwtxtstamp_task)) + adpt->hwtxtstamp_stats.sched++; +} + +static void emac_hwtxtstamp_task_routine(struct work_struct *work) +{ + struct emac_adapter *adpt = container_of(work, struct emac_adapter, + hwtxtstamp_task); + struct sk_buff *skb; + struct sk_buff_head q; + unsigned long flags; + + adpt->hwtxtstamp_stats.poll++; + + __skb_queue_head_init(&q); + + while (1) { + spin_lock_irqsave(&adpt->hwtxtstamp_lock, flags); + if (adpt->hwtxtstamp_pending_queue.qlen) + emac_poll_hwtxtstamp(adpt); + skb_queue_splice_tail_init(&adpt->hwtxtstamp_ready_queue, &q); + spin_unlock_irqrestore(&adpt->hwtxtstamp_lock, flags); + + if (!q.qlen) + break; + + while ((skb = __skb_dequeue(&q))) { + struct emac_hwtxtstamp_cb *cb = EMAC_HWTXTSTAMP_CB(skb); + + if (cb->sec || cb->ns) { + struct skb_shared_hwtstamps ts; + + ts.hwtstamp = ktime_set(cb->sec, cb->ns); + skb_tstamp_tx(skb, &ts); + adpt->hwtxtstamp_stats.deliver++; + } + dev_kfree_skb_any(skb); + } + } + + if (adpt->hwtxtstamp_pending_queue.qlen) + emac_schedule_hwtxtstamp_task(adpt); +} + +/* Process receive event */ +static void emac_handle_rx(struct emac_adapter *adpt, + struct emac_rx_queue *rxque, + int *num_pkts, int max_pkts) +{ + struct emac_hw *hw = &adpt->hw; + struct net_device *netdev = adpt->netdev; + + union emac_sw_rrdesc srrd; + struct emac_buffer *rfbuf; + struct sk_buff *skb; + + u32 hw_consume_idx, num_consume_pkts; + u32 count = 0; + u32 proc_idx; + + hw_consume_idx = emac_reg_field_r32(hw, EMAC, rxque->consume_reg, + rxque->consume_mask, + rxque->consume_shft); + num_consume_pkts = (hw_consume_idx >= rxque->rrd.consume_idx) ? + (hw_consume_idx - rxque->rrd.consume_idx) : + (hw_consume_idx + rxque->rrd.count - rxque->rrd.consume_idx); + + do { + if (!num_consume_pkts) + break; + + if (!emac_get_rrdesc(rxque, &srrd)) + break; + + if (likely(srrd.genr.nor == 1)) { + /* good receive */ + rfbuf = GET_RFD_BUFFER(rxque, srrd.genr.si); + dma_unmap_single(rxque->dev, rfbuf->dma, rfbuf->length, + DMA_FROM_DEVICE); + rfbuf->dma = 0; + skb = rfbuf->skb; + } else { + /* multi rfd not supported */ + emac_err(adpt, "multi-RFD not support yet!\n"); + break; + } + emac_clean_rfdesc(rxque, &srrd); + num_consume_pkts--; + count++; + + /* Due to a HW issue in L4 check sum detection (UDP/TCP frags + * with DF set are marked as error), drop packets based on the + * error mask rather than the summary bit (ignoring L4F errors) + */ + if (srrd.dfmt.dw[EMAC_RRDES_STATS_DW_IDX] & EMAC_RRDES_ERROR) { + emac_dbg(adpt, rx_status, adpt->netdev, + "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n", + srrd.dfmt.dw[0], srrd.dfmt.dw[1], + srrd.dfmt.dw[2], srrd.dfmt.dw[3]); + + dev_kfree_skb(skb); + continue; + } + + skb_put(skb, srrd.genr.pkt_len - ETH_FCS_LEN); + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + if (netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = ((srrd.genr.l4f) ? + CHECKSUM_NONE : CHECKSUM_UNNECESSARY); + else + skb_checksum_none_assert(skb); + + if (TEST_FLAG(hw, HW_TS_RX_EN)) { + struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); + + hwts->hwtstamp = ktime_set(srrd.genr.ts_high, + srrd.genr.ts_low); + } + + emac_receive_skb(rxque, skb, (u16)srrd.genr.cvlan_tag, + (bool)srrd.genr.cvlan_flag); + + //netdev->last_rx = jiffies; removed as not applicable in kernel 6.1 + (*num_pkts)++; + } while (*num_pkts < max_pkts); + + if (count) { + proc_idx = (rxque->rfd.process_idx << rxque->process_shft) & + rxque->process_mask; + wmb(); /* ensure that the descriptors are properly cleared */ + emac_reg_update32(hw, EMAC, rxque->process_reg, + rxque->process_mask, proc_idx); + wmb(); /* ensure that RFD producer index is flushed to HW */ + emac_dbg(adpt, rx_status, adpt->netdev, "RX[%d]: proc idx 0x%x\n", + rxque->que_idx, rxque->rfd.process_idx); + + emac_refresh_rx_buffer(rxque); + } +} + +/* get the number of free transmit descriptors */ +static u32 emac_get_num_free_tpdescs(struct emac_tx_queue *txque) +{ + u32 produce_idx = txque->tpd.produce_idx; + u32 consume_idx = txque->tpd.consume_idx; + + return (consume_idx > produce_idx) ? + (consume_idx - produce_idx - 1) : + (txque->tpd.count + consume_idx - produce_idx - 1); +} + +/* Process transmit event */ +static void emac_handle_tx(struct emac_adapter *adpt, + struct emac_tx_queue *txque) +{ + struct emac_hw *hw = &adpt->hw; + struct emac_buffer *tpbuf; + u32 hw_consume_idx; + u32 pkts_compl = 0, bytes_compl = 0; + + hw_consume_idx = emac_reg_field_r32(hw, EMAC, txque->consume_reg, + txque->consume_mask, + txque->consume_shft); + emac_dbg(adpt, tx_done, adpt->netdev, "TX[%d]: cons idx 0x%x\n", + txque->que_idx, hw_consume_idx); + + while (txque->tpd.consume_idx != hw_consume_idx) { + tpbuf = GET_TPD_BUFFER(txque, txque->tpd.consume_idx); + if (tpbuf->dma) { + dma_unmap_single(txque->dev, tpbuf->dma, tpbuf->length, + DMA_TO_DEVICE); + tpbuf->dma = 0; + } + + if (tpbuf->skb) { + pkts_compl++; + bytes_compl += tpbuf->skb->len; + dev_kfree_skb_irq(tpbuf->skb); + tpbuf->skb = NULL; + } + + if (++txque->tpd.consume_idx == txque->tpd.count) + txque->tpd.consume_idx = 0; + } + + if (pkts_compl || bytes_compl) + netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl); +} + +/* NAPI */ +static int emac_napi_rtx(struct napi_struct *napi, int budget) +{ + struct emac_rx_queue *rxque = container_of(napi, struct emac_rx_queue, + napi); + struct emac_adapter *adpt = netdev_priv(rxque->netdev); + struct emac_irq_per_dev *irq = rxque->irq; + struct emac_hw *hw = &adpt->hw; + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adpt->netdev)) + goto quit_polling; + + emac_handle_rx(adpt, rxque, &work_done, budget); + + if (work_done < budget) { +quit_polling: + napi_complete(napi); + + irq->mask |= rxque->intr; + emac_reg_w32(hw, EMAC, emac_irq_cmn_tbl[irq->idx].mask_reg, + irq->mask); + wmb(); /* ensure that interrupt enable is flushed to HW */ + } + return work_done; +} + +/* Check if enough transmit descriptors are available */ +static bool emac_check_num_tpdescs(struct emac_tx_queue *txque, + const struct sk_buff *skb) +{ + u32 num_required = 1; + u16 i; + u16 proto_hdr_len = 0; + + if (skb_is_gso(skb)) { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) + num_required++; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + num_required++; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + num_required++; + + return num_required < emac_get_num_free_tpdescs(txque); +} + +/* Fill up transmit descriptors with TSO and Checksum offload information */ +static int emac_tso_csum(struct emac_adapter *adpt, + struct emac_tx_queue *txque, + struct sk_buff *skb, + union emac_sw_tpdesc *stpd) +{ + u8 hdr_len; + int retval; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + retval = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(retval)) + return retval; + } + + if (skb->protocol == htons(ETH_P_IP)) { + u32 pkt_len = + ((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len); + if (skb->len > pkt_len) + pskb_trim(skb, pkt_len); + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (unlikely(skb->len == hdr_len)) { + /* we only need to do csum */ + emac_warn(adpt, tx_err, adpt->netdev, + "tso not needed for packet with 0 data\n"); + goto do_csum; + } + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + stpd->genr.ipv4 = 1; + } + + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + /* ipv6 tso need an extra tpd */ + union emac_sw_tpdesc extra_tpd; + + memset(stpd, 0, sizeof(union emac_sw_tpdesc)); + memset(&extra_tpd, 0, sizeof(union emac_sw_tpdesc)); + + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + extra_tpd.tso.pkt_len = skb->len; + extra_tpd.tso.lso = 0x1; + extra_tpd.tso.lso_v2 = 0x1; + emac_set_tpdesc(txque, &extra_tpd); + stpd->tso.lso_v2 = 0x1; + } + + stpd->tso.lso = 0x1; + stpd->tso.tcphdr_offset = skb_transport_offset(skb); + stpd->tso.mss = skb_shinfo(skb)->gso_size; + return 0; + } + +do_csum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + + cso = skb_transport_offset(skb); + if (unlikely(cso & 0x1)) { + emac_err(adpt, "payload offset should be even\n"); + return -EINVAL; + } + css = cso + skb->csum_offset; + + stpd->csum.payld_offset = cso >> 1; + stpd->csum.cxsum_offset = css >> 1; + stpd->csum.c_csum = 0x1; + } + + return 0; +} + +/* Fill up transmit descriptors */ +static void emac_tx_map(struct emac_adapter *adpt, + struct emac_tx_queue *txque, + struct sk_buff *skb, + union emac_sw_tpdesc *stpd) +{ + struct emac_hw *hw = &adpt->hw; + struct emac_buffer *tpbuf = NULL; + u16 nr_frags = skb_shinfo(skb)->nr_frags; + u32 len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 i; + u32 tso = stpd->tso.lso; + + if (tso) { + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + map_len = hdr_len; + + tpbuf = GET_TPD_BUFFER(txque, txque->tpd.produce_idx); + tpbuf->length = map_len; + tpbuf->dma = dma_map_single(txque->dev, skb->data, + hdr_len, DMA_TO_DEVICE); + mapped_len += map_len; + stpd->genr.addr_lo = EMAC_DMA_ADDR_LO(tpbuf->dma); + stpd->genr.addr_hi = EMAC_DMA_ADDR_HI(tpbuf->dma); + stpd->genr.buffer_len = tpbuf->length; + emac_set_tpdesc(txque, stpd); + } + + if (mapped_len < len) { + tpbuf = GET_TPD_BUFFER(txque, txque->tpd.produce_idx); + tpbuf->length = len - mapped_len; + tpbuf->dma = dma_map_single(txque->dev, skb->data + mapped_len, + tpbuf->length, DMA_TO_DEVICE); + stpd->genr.addr_lo = EMAC_DMA_ADDR_LO(tpbuf->dma); + stpd->genr.addr_hi = EMAC_DMA_ADDR_HI(tpbuf->dma); + stpd->genr.buffer_len = tpbuf->length; + emac_set_tpdesc(txque, stpd); + } + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag; + + frag = &skb_shinfo(skb)->frags[i]; + + tpbuf = GET_TPD_BUFFER(txque, txque->tpd.produce_idx); + tpbuf->length = frag->bv_len; + tpbuf->dma = dma_map_page(txque->dev, frag->bv_page, + frag->bv_offset, + tpbuf->length, + DMA_TO_DEVICE); + stpd->genr.addr_lo = EMAC_DMA_ADDR_LO(tpbuf->dma); + stpd->genr.addr_hi = EMAC_DMA_ADDR_HI(tpbuf->dma); + stpd->genr.buffer_len = tpbuf->length; + emac_set_tpdesc(txque, stpd); + } + + /* The last tpd */ + emac_set_tpdesc_lastfrag(txque); + + if (TEST_FLAG(hw, HW_TS_TX_EN) && + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct sk_buff *skb_ts = skb_clone(skb, GFP_ATOMIC); + + if (likely(skb_ts)) { + unsigned long flags; + + emac_set_tpdesc_tstamp_sav(txque); + skb_ts->sk = skb->sk; + EMAC_SKB_CB(skb_ts)->tpd_idx = + txque->tpd.last_produce_idx; + EMAC_SKB_CB(skb_ts)->jiffies = get_jiffies_64(); + skb_shinfo(skb_ts)->tx_flags |= SKBTX_IN_PROGRESS; + spin_lock_irqsave(&adpt->hwtxtstamp_lock, flags); + if (adpt->hwtxtstamp_pending_queue.qlen >= + EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD) { + emac_poll_hwtxtstamp(adpt); + adpt->hwtxtstamp_stats.tx_poll++; + } + __skb_queue_tail(&adpt->hwtxtstamp_pending_queue, + skb_ts); + spin_unlock_irqrestore(&adpt->hwtxtstamp_lock, flags); + adpt->hwtxtstamp_stats.tx++; + emac_schedule_hwtxtstamp_task(adpt); + } + } + + /* The last buffer info contain the skb address, + * so it will be freed after unmap + */ + if (tpbuf) + tpbuf->skb = skb; +} + +/* Transmit the packet using specified transmit queue */ +static int emac_start_xmit_frame(struct emac_adapter *adpt, + struct emac_tx_queue *txque, + struct sk_buff *skb) +{ + struct emac_hw *hw = &adpt->hw; + union emac_sw_tpdesc stpd; + u32 prod_idx; + u16 tci = 0; + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (!emac_check_num_tpdescs(txque, skb)) { + /* not enough descriptors, just stop queue */ + netif_stop_queue(adpt->netdev); + return NETDEV_TX_BUSY; + } + + memset(&stpd, 0, sizeof(union emac_sw_tpdesc)); + + if (emac_tso_csum(adpt, txque, skb, &stpd) != 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /*if (vlan_tx_tag_present(skb)) { + * u16 vlan = vlan_tx_tag_get(skb); + * u16 tag; + * + * tag = (((vlan >> 8) & 0xFF) | ((vlan & 0xFF) << 8)); + * stpd.genr.cvlan_tag = tag; + * stpd.genr.ins_cvtag = 0x1; + * } + * depricated API of kernel 3.18 + */ + + if (vlan_get_tag(skb, &tci)) { + stpd.genr.cvlan_tag = tci; + stpd.genr.ins_cvtag = 0x1; + } + + if (skb_network_offset(skb) != ETH_HLEN) + stpd.genr.type = 0x1; + + emac_tx_map(adpt, txque, skb, &stpd); + + netdev_sent_queue(adpt->netdev, skb->len); + + /* update produce idx */ + prod_idx = (txque->tpd.produce_idx << txque->produce_shft) & + txque->produce_mask; + wmb(); /* ensure that the descriptors are properly set */ + emac_reg_update32(hw, EMAC, txque->produce_reg, + txque->produce_mask, prod_idx); + wmb(); /* ensure that RFD producer index is flushed to HW */ + emac_dbg(adpt, tx_queued, adpt->netdev, "TX[%d]: prod idx 0x%x\n", + txque->que_idx, txque->tpd.produce_idx); + + return NETDEV_TX_OK; +} + +/* Transmit the packet */ +static int emac_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_tx_queue *txque; + + txque = &adpt->tx_queue[EMAC_ACTIVE_TXQ]; + return emac_start_xmit_frame(adpt, txque, skb); +} + +/* This funciton aquire spin-lock so should not call from sleeping context */ +void emac_wol_gpio_irq(struct emac_adapter *adpt, bool enable) +{ + struct emac_irq_per_dev *wol_irq = &adpt->irq[EMAC_WOL_IRQ]; + struct emac_phy *phy = &adpt->phy; + unsigned long flags; + + spin_lock_irqsave(&phy->wol_irq_lock, flags); + if (enable && !phy->is_wol_enabled) + enable_irq(wol_irq->irq); + else if (!enable && phy->is_wol_enabled) + disable_irq_nosync(wol_irq->irq); + phy->is_wol_enabled = enable; + spin_unlock_irqrestore(&phy->wol_irq_lock, flags); +} + +/* ISR */ +static irqreturn_t emac_wol_isr(int irq, void *data) +{ + struct emac_adapter *adpt = emac_irq_get_adpt(data); + struct net_device *netdev = adpt->netdev; + u16 val = 0, i; + u32 ret = 0; + + pm_runtime_get_sync(netdev->dev.parent); + + /* read switch interrupt status reg */ + if (adpt->phydev->phy_id == QCA8337_PHY_ID) + ret = qca8337_read(adpt->phydev->priv, QCA8337_GLOBAL_INT1); + + for (i = 0; i < QCA8337_NUM_PHYS ; i++) { + ret = mdiobus_read(adpt->phydev->mdio.bus, i, MII_INT_STATUS); + + if ((ret & LINK_SUCCESS_INTERRUPT) || (ret & LINK_SUCCESS_BX)) + val |= 1 << i; + if (adpt->phydev->phy_id != QCA8337_PHY_ID) + break; + } + + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + + if (!pm_runtime_status_suspended(adpt->netdev->dev.parent)) { + if (val) + emac_wol_gpio_irq(adpt, false); + if (ret & WOL_INT) + __pm_stay_awake(adpt->link_wlock); + } + return IRQ_HANDLED; +} + +static irqreturn_t emac_isr(int _irq, void *data) +{ + struct emac_irq_per_dev *irq = data; + const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[irq->idx]; + struct emac_adapter *adpt = emac_irq_get_adpt(data); + struct emac_rx_queue *rxque = &adpt->rx_queue[irq->idx]; + struct emac_hw *hw = &adpt->hw; + int max_ints = 1; + u32 isr, status; + + emac_dbg(emac_irq_get_adpt(data), wol, adpt->netdev, "EMAC wol interrupt received\n"); + /* disable the interrupt */ + emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, 0); + wmb(); /* ensure that interrupt disable is flushed to HW */ + + do { + isr = emac_reg_r32(hw, EMAC, irq_cmn->status_reg); + status = isr & irq->mask; + + if (status == 0) + break; + + if (status & ISR_ERROR) { + emac_warn(adpt, intr, adpt->netdev, "isr error status 0x%lx\n", + status & ISR_ERROR); + /* reset MAC */ + SET_FLAG(adpt, ADPT_TASK_REINIT_REQ); + emac_task_schedule(adpt); + } + + /* Schedule the napi for receive queue with interrupt + * status bit set + */ + if ((status & rxque->intr)) { + if (napi_schedule_prep(&rxque->napi)) { + irq->mask &= ~rxque->intr; + __napi_schedule(&rxque->napi); + } + } + + if (status & ISR_TX_PKT) { + if (status & TX_PKT_INT) + emac_handle_tx(adpt, &adpt->tx_queue[0]); + if (status & TX_PKT_INT1) + emac_handle_tx(adpt, &adpt->tx_queue[1]); + if (status & TX_PKT_INT2) + emac_handle_tx(adpt, &adpt->tx_queue[2]); + if (status & TX_PKT_INT3) + emac_handle_tx(adpt, &adpt->tx_queue[3]); + } + + if (status & ISR_OVER) + emac_warn(adpt, intr, adpt->netdev, "TX/RX overflow status 0x%lx\n", + status & ISR_OVER); + + /* link event */ + if (status & (ISR_GPHY_LINK | SW_MAN_INT)) { + adpt->irq_status = ISR_GPHY_LINK; + emac_check_lsc(adpt); + break; + } + + if (status & PTP_INT) + emac_ptp_intr(hw); + } while (--max_ints > 0); + + /* enable the interrupt */ + emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, irq->mask); + wmb(); /* ensure that interrupt enable is flushed to HW */ + return IRQ_HANDLED; +} + +/* Enable interrupts */ +static inline void emac_enable_intr(struct emac_adapter *adpt) +{ + struct emac_hw *hw = &adpt->hw; + + emac_hw_enable_intr(hw); +} + +/* Disable interrupts */ +static inline void emac_disable_intr(struct emac_adapter *adpt) +{ + struct emac_hw *hw = &adpt->hw; + int i; + + emac_hw_disable_intr(hw); + for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) + if (adpt->irq[i].irq) + synchronize_irq(adpt->irq[i].irq); +} + +/* Configure VLAN tag strip/insert feature */ +static int emac_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + netdev_features_t changed = features ^ netdev->features; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))) + return 0; + + if (!netif_running(netdev)) + return 0; + + netdev->features = features; + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + SET_FLAG(hw, HW_VLANSTRIP_EN); + else + CLR_FLAG(hw, HW_VLANSTRIP_EN); + + return emac_reinit_locked(adpt); +} + +static void emac_napi_enable_all(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_rxques; i++) + napi_enable(&adpt->rx_queue[i].napi); +} + +static void emac_napi_disable_all(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_rxques; i++) + napi_disable(&adpt->rx_queue[i].napi); +} + +/* Free all descriptors of given transmit queue */ +static void emac_clean_tx_queue(struct emac_tx_queue *txque) +{ + struct device *dev = txque->dev; + unsigned long size; + u32 i; + + /* ring already cleared, nothing to do */ + if (!txque->tpd.tpbuff) + return; + + for (i = 0; i < txque->tpd.count; i++) { + struct emac_buffer *tpbuf = GET_TPD_BUFFER(txque, i); + + if (tpbuf->dma) { + dma_unmap_single(dev, tpbuf->dma, tpbuf->length, + DMA_TO_DEVICE); + tpbuf->dma = 0; + } + if (tpbuf->skb) { + dev_kfree_skb_any(tpbuf->skb); + tpbuf->skb = NULL; + } + } + + size = sizeof(struct emac_buffer) * txque->tpd.count; + memset(txque->tpd.tpbuff, 0, size); + + /* clear the descriptor ring */ + memset(txque->tpd.tpdesc, 0, txque->tpd.size); + + txque->tpd.consume_idx = 0; + txque->tpd.produce_idx = 0; +} + +static void emac_clean_all_tx_queues(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_txques; i++) + emac_clean_tx_queue(&adpt->tx_queue[i]); + netdev_reset_queue(adpt->netdev); +} + +/* Free all descriptors of given receive queue */ +static void emac_clean_rx_queue(struct emac_rx_queue *rxque) +{ + struct device *dev = rxque->dev; + unsigned long size; + u32 i; + + /* ring already cleared, nothing to do */ + if (!rxque->rfd.rfbuff) + return; + + for (i = 0; i < rxque->rfd.count; i++) { + struct emac_buffer *rfbuf = GET_RFD_BUFFER(rxque, i); + + if (rfbuf->dma) { + dma_unmap_single(dev, rfbuf->dma, rfbuf->length, + DMA_FROM_DEVICE); + rfbuf->dma = 0; + } + if (rfbuf->skb) { + dev_kfree_skb(rfbuf->skb); + rfbuf->skb = NULL; + } + } + + size = sizeof(struct emac_buffer) * rxque->rfd.count; + memset(rxque->rfd.rfbuff, 0, size); + + /* clear the descriptor rings */ + memset(rxque->rrd.rrdesc, 0, rxque->rrd.size); + rxque->rrd.produce_idx = 0; + rxque->rrd.consume_idx = 0; + + memset(rxque->rfd.rfdesc, 0, rxque->rfd.size); + rxque->rfd.produce_idx = 0; + rxque->rfd.consume_idx = 0; +} + +static void emac_clean_all_rx_queues(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_rxques; i++) + emac_clean_rx_queue(&adpt->rx_queue[i]); +} + +/* Free all buffers associated with given transmit queue */ +static void emac_free_tx_descriptor(struct emac_tx_queue *txque) +{ + emac_clean_tx_queue(txque); + + kfree(txque->tpd.tpbuff); + txque->tpd.tpbuff = NULL; + txque->tpd.tpdesc = NULL; + txque->tpd.tpdma = 0; + txque->tpd.size = 0; +} + +static void emac_free_all_tx_descriptor(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_txques; i++) + emac_free_tx_descriptor(&adpt->tx_queue[i]); +} + +/* Allocate TX descriptor ring for the given transmit queue */ +static int emac_alloc_tx_descriptor(struct emac_adapter *adpt, + struct emac_tx_queue *txque) +{ + struct emac_ring_header *ring_header = &adpt->ring_header; + unsigned long size; + + size = sizeof(struct emac_buffer) * txque->tpd.count; + txque->tpd.tpbuff = kzalloc(size, GFP_KERNEL); + if (!txque->tpd.tpbuff) + goto err_alloc_tpq_buffer; + + txque->tpd.size = txque->tpd.count * (adpt->tpdesc_size * 4); + txque->tpd.tpdma = ring_header->dma + ring_header->used; + txque->tpd.tpdesc = ring_header->desc + ring_header->used; + ring_header->used += ALIGN(txque->tpd.size, 8); + txque->tpd.produce_idx = 0; + txque->tpd.consume_idx = 0; + return 0; + +err_alloc_tpq_buffer: + emac_err(adpt, "Unable to allocate memory for the Tx descriptor\n"); + return -ENOMEM; +} + +static int emac_alloc_all_tx_descriptor(struct emac_adapter *adpt) +{ + int retval = 0; + u8 i; + + for (i = 0; i < adpt->num_txques; i++) { + retval = emac_alloc_tx_descriptor(adpt, &adpt->tx_queue[i]); + if (retval) + break; + } + + if (retval) { + emac_err(adpt, "Allocation for Tx Queue %u failed\n", i); + for (i--; i > 0; i--) + emac_free_tx_descriptor(&adpt->tx_queue[i]); + } + + return retval; +} + +/* Free all buffers associated with given transmit queue */ +static void emac_free_rx_descriptor(struct emac_rx_queue *rxque) +{ + emac_clean_rx_queue(rxque); + + kfree(rxque->rfd.rfbuff); + rxque->rfd.rfbuff = NULL; + + rxque->rfd.rfdesc = NULL; + rxque->rfd.rfdma = 0; + rxque->rfd.size = 0; + + rxque->rrd.rrdesc = NULL; + rxque->rrd.rrdma = 0; + rxque->rrd.size = 0; +} + +static void emac_free_all_rx_descriptor(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < adpt->num_rxques; i++) + emac_free_rx_descriptor(&adpt->rx_queue[i]); +} + +/* Allocate RX descriptor rings for the given receive queue */ +static int emac_alloc_rx_descriptor(struct emac_adapter *adpt, + struct emac_rx_queue *rxque) +{ + struct emac_ring_header *ring_header = &adpt->ring_header; + unsigned long size; + + size = sizeof(struct emac_buffer) * rxque->rfd.count; + rxque->rfd.rfbuff = kzalloc(size, GFP_KERNEL); + if (!rxque->rfd.rfbuff) + goto err_alloc_rfq_buffer; + + rxque->rrd.size = rxque->rrd.count * (adpt->rrdesc_size * 4); + rxque->rfd.size = rxque->rfd.count * (adpt->rfdesc_size * 4); + + rxque->rrd.rrdma = ring_header->dma + ring_header->used; + rxque->rrd.rrdesc = ring_header->desc + ring_header->used; + ring_header->used += ALIGN(rxque->rrd.size, 8); + + rxque->rfd.rfdma = ring_header->dma + ring_header->used; + rxque->rfd.rfdesc = ring_header->desc + ring_header->used; + ring_header->used += ALIGN(rxque->rfd.size, 8); + + rxque->rrd.produce_idx = 0; + rxque->rrd.consume_idx = 0; + + rxque->rfd.produce_idx = 0; + rxque->rfd.consume_idx = 0; + + return 0; + +err_alloc_rfq_buffer: + emac_err(adpt, "Unable to allocate memory for the Rx descriptor\n"); + return -ENOMEM; +} + +static int emac_alloc_all_rx_descriptor(struct emac_adapter *adpt) +{ + int retval = 0; + u8 i; + + for (i = 0; i < adpt->num_rxques; i++) { + retval = emac_alloc_rx_descriptor(adpt, &adpt->rx_queue[i]); + if (retval) + break; + } + + if (retval) { + emac_err(adpt, "Allocation for Rx Queue %u failed\n", i); + for (i--; i > 0; i--) + emac_free_rx_descriptor(&adpt->rx_queue[i]); + } + + return retval; +} + +/* Allocate all TX and RX descriptor rings */ +static int emac_alloc_all_rtx_descriptor(struct emac_adapter *adpt) +{ + struct emac_ring_header *ring_header = &adpt->ring_header; + int num_tques = adpt->num_txques; + int num_rques = adpt->num_rxques; + unsigned int num_tx_descs = adpt->num_txdescs; + unsigned int num_rx_descs = adpt->num_rxdescs; + struct device *dev = adpt->rx_queue[0].dev; + int retval, que_idx; + + for (que_idx = 0; que_idx < adpt->num_txques; que_idx++) + adpt->tx_queue[que_idx].tpd.count = adpt->num_txdescs; + + for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++) { + adpt->rx_queue[que_idx].rrd.count = adpt->num_rxdescs; + adpt->rx_queue[que_idx].rfd.count = adpt->num_rxdescs; + } + + /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, + * hence the additional padding bytes are allocated. + */ + ring_header->size = + num_tques * num_tx_descs * (adpt->tpdesc_size * 4) + + num_rques * num_rx_descs * (adpt->rfdesc_size * 4) + + num_rques * num_rx_descs * (adpt->rrdesc_size * 4) + + num_tques * 8 + num_rques * 2 * 8; + + emac_info(adpt, ifup, adpt->netdev, "TX queues %d, TX descriptors %d\n", + num_tques, num_tx_descs); + emac_info(adpt, ifup, adpt->netdev, "RX queues %d, Rx descriptors %d\n", + num_rques, num_rx_descs); + + ring_header->used = 0; + ring_header->desc = dma_zalloc_coherent(dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); + if (!ring_header->desc) { + retval = -ENOMEM; + goto err_alloc_dma; + } + /* dma_zalloc_coherent should be used for ring_header -> desc, + * instead of dma_alloc_coherent/memset + */ + //memset(ring_header->desc, 0, ring_header->size); + ring_header->used = ALIGN(ring_header->dma, 8) - ring_header->dma; + + retval = emac_alloc_all_tx_descriptor(adpt); + if (retval) + goto err_alloc_tx; + + retval = emac_alloc_all_rx_descriptor(adpt); + if (retval) + goto err_alloc_rx; + + return 0; + +err_alloc_rx: + emac_free_all_tx_descriptor(adpt); +err_alloc_tx: + dma_free_coherent(dev, ring_header->size, + ring_header->desc, ring_header->dma); + + ring_header->desc = NULL; + ring_header->dma = 0; + ring_header->size = 0; + ring_header->used = 0; +err_alloc_dma: + return retval; +} + +/* Free all TX and RX descriptor rings */ +static void emac_free_all_rtx_descriptor(struct emac_adapter *adpt) +{ + struct emac_ring_header *ring_header = &adpt->ring_header; + struct device *dev = adpt->rx_queue[0].dev; + + emac_free_all_tx_descriptor(adpt); + emac_free_all_rx_descriptor(adpt); + + dma_free_coherent(dev, ring_header->size, + ring_header->desc, ring_header->dma); + + ring_header->desc = NULL; + ring_header->dma = 0; + ring_header->size = 0; + ring_header->used = 0; +} + +/* Initialize descriptor rings */ +static void emac_init_ring_ptrs(struct emac_adapter *adpt) +{ + int i, j; + + for (i = 0; i < adpt->num_txques; i++) { + struct emac_tx_queue *txque = &adpt->tx_queue[i]; + struct emac_buffer *tpbuf = txque->tpd.tpbuff; + + txque->tpd.produce_idx = 0; + txque->tpd.consume_idx = 0; + for (j = 0; j < txque->tpd.count; j++) + tpbuf[j].dma = 0; + } + + for (i = 0; i < adpt->num_rxques; i++) { + struct emac_rx_queue *rxque = &adpt->rx_queue[i]; + struct emac_buffer *rfbuf = rxque->rfd.rfbuff; + + rxque->rrd.produce_idx = 0; + rxque->rrd.consume_idx = 0; + rxque->rfd.produce_idx = 0; + rxque->rfd.consume_idx = 0; + for (j = 0; j < rxque->rfd.count; j++) + rfbuf[j].dma = 0; + } +} + +/* Configure Receive Side Scaling (RSS) */ +static void emac_config_rss(struct emac_adapter *adpt) +{ + static const u8 key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA}; + + struct emac_hw *hw = &adpt->hw; + u32 reta = 0; + u16 i, j; + + if (adpt->num_rxques == 1) + return; + + if (!hw->rss_initialized) { + hw->rss_initialized = true; + /* initialize rss hash type and idt table size */ + hw->rss_hstype = EMAC_RSS_HSTYP_ALL_EN; + hw->rss_idt_size = EMAC_RSS_IDT_SIZE; + + /* Fill out RSS key */ + memcpy(hw->rss_key, key, sizeof(hw->rss_key)); + + /* Fill out redirection table */ + memset(hw->rss_idt, 0x0, sizeof(hw->rss_idt)); + for (i = 0, j = 0; i < EMAC_RSS_IDT_SIZE; i++, j++) { + if (j == adpt->num_rxques) + j = 0; + if (j > 1) + reta |= (j << ((i & 7) * 4)); + if ((i & 7) == 7) { + hw->rss_idt[i >> 3] = reta; + reta = 0; + } + } + } + + emac_hw_config_rss(hw); +} + +/* Change the Maximum Transfer Unit (MTU) */ +static int emac_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if (max_frame < EMAC_MIN_ETH_FRAME_SIZE || + max_frame > EMAC_MAX_ETH_FRAME_SIZE) { + emac_err(adpt, "invalid MTU setting\n"); + return -EINVAL; + } + + if (old_mtu != new_mtu && netif_running(netdev)) { + emac_info(adpt, hw, adpt->netdev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + adpt->rxbuf_size = new_mtu > EMAC_DEF_RX_BUF_SIZE ? + ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE; + if (netif_running(netdev)) + return emac_reinit_locked(adpt); + } + return 0; +} + +static inline int msm_emac_request_pinctrl_on(struct emac_adapter *adpt, + bool mdio, bool ephy) +{ + int result = 0; + int ret = 0; + struct emac_phy *phy = &adpt->phy; + + if (phy->external) { + if (mdio) { + result = pinctrl_select_state(adpt->pinctrl, + adpt->mdio_pins_clk_active); + if (result) + emac_err(adpt, + "error:%d Can not switch on %s pins\n", + result, + EMAC_PINCTRL_STATE_MDIO_CLK_ACTIVE); + ret = result; + + result = pinctrl_select_state(adpt->pinctrl, + adpt->mdio_pins_data_active); + if (result) + emac_err(adpt, + "error:%d Can not switch on %s pins\n", + result, + EMAC_PINCTRL_STATE_MDIO_DATA_ACTIVE); + ret = result; + } + + if (ephy) { + result = pinctrl_select_state(adpt->pinctrl, + adpt->ephy_pins_active); + if (result) + emac_err(adpt, + "error:%d Can not switch on %s pins\n", + result, + EMAC_PINCTRL_STATE_EPHY_ACTIVE); + if (!ret) + ret = result; + } + } + return ret; +} + +static inline int msm_emac_request_pinctrl_off(struct emac_adapter *adpt, + bool mdio, bool ephy) +{ + int result = 0; + int ret = 0; + struct emac_phy *phy = &adpt->phy; + + if (phy->external) { + if (mdio) { + result = pinctrl_select_state(adpt->pinctrl, + adpt->mdio_pins_clk_sleep); + if (result) + emac_err(adpt, + "error:%d Can not switch off %s pins\n", + result, EMAC_PINCTRL_STATE_MDIO_CLK_SLEEP); + ret = result; + + result = pinctrl_select_state(adpt->pinctrl, + adpt->mdio_pins_data_sleep); + if (result) + emac_err(adpt, + "error:%d Can not switch off %s pins\n", + result, EMAC_PINCTRL_STATE_MDIO_DATA_SLEEP); + ret = result; + } + + if (ephy) { + result = pinctrl_select_state(adpt->pinctrl, + adpt->ephy_pins_sleep); + if (result) + emac_err(adpt, + "error:%d Can not switch off %s pins\n", + result, EMAC_PINCTRL_STATE_EPHY_SLEEP); + if (!ret) + ret = result; + } + } + return ret; +} + +/* Check link status and handle link state changes */ +static void emac_adjust_link(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + struct emac_phy *phy = &adpt->phy; + struct emac_hw *hw = &adpt->hw; + bool status_changed = false; + + if (!TEST_FLAG(adpt, ADPT_TASK_LSC_REQ)) + return; + CLR_FLAG(adpt, ADPT_TASK_LSC_REQ); + + /* ensure that no reset is in progress while link task is running */ + while (TEST_N_SET_FLAG(adpt, ADPT_STATE_RESETTING)) + /* Reset might take few 10s of ms */ + msleep(EMAC_ADPT_RESET_WAIT_TIME); + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) + goto link_task_done; + + if (!phy->external) + phy->ops.link_check_no_ephy(adpt, phydev); + + if (phy->link_up != phydev->link) { + status_changed = true; + phy->link_up = phydev->link; + } + + if (phydev->link) { + /* check speed/duplex/pause changes */ + if (phy->link_speed != phydev->speed || + phy->link_duplex != phydev->duplex || + phy->link_pause != phydev->pause) { + phy->link_speed = phydev->speed; + phy->link_duplex = phydev->duplex; + phy->link_pause = phydev->pause; + status_changed = true; + } + + /* done if nothing has changed */ + if (!status_changed) + goto link_task_done; + + /* Acquire resources */ + pm_runtime_get_sync(netdev->dev.parent); + + /* Acquire wake lock if link is detected to avoid device going + * into suspend + */ + __pm_stay_awake(adpt->link_wlock); + + phy->ops.tx_clk_set_rate(adpt); + + emac_hw_start_mac(hw); + } else { + /* done if nothing has changed */ + if (!status_changed) + goto link_task_done; + + emac_hw_stop_mac(hw); + + /* Release wake lock if link is disconnected */ + __pm_relax(adpt->link_wlock); + + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + } + + if (status_changed) + phy_print_status(phydev); + +link_task_done: + CLR_FLAG(adpt, ADPT_STATE_RESETTING); +} + +/* Bringup the interface/HW */ +int emac_mac_up(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_hw *hw = &adpt->hw; + struct net_device *netdev = adpt->netdev; + int ret = 0; + int i = 0, irq_cnt = 0; + + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN)) + return 0; + + emac_init_ring_ptrs(adpt); + emac_set_rx_mode(netdev); + + emac_hw_config_mac(hw); + emac_config_rss(adpt); + + ret = phy->ops.up(adpt); + if (ret) + return ret; + + for (irq_cnt = 0; irq_cnt < EMAC_NUM_CORE_IRQ; irq_cnt++) { + struct emac_irq_per_dev *irq = &adpt->irq[irq_cnt]; + const struct emac_irq_common *irq_cmn = + &emac_irq_cmn_tbl[irq_cnt]; + + if (!irq->irq) + continue; + + ret = request_irq(irq->irq, irq_cmn->handler, + irq_cmn->irqflags, irq_cmn->name, irq); + if (ret) { + emac_err(adpt, + "error:%d on request_irq(%d:%s flags:0x%lx)\n", + ret, irq->irq, irq_cmn->name, + irq_cmn->irqflags); + goto err_request_irq; + } + } + + for (i = 0; i < adpt->num_rxques; i++) + emac_refresh_rx_buffer(&adpt->rx_queue[i]); + + if (!adpt->phy.is_ext_phy_connect) { + ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, + phy->phy_interface); + if (ret) { + netdev_err(adpt->netdev, "could not connect phy\n"); + goto err_request_irq; + } + adpt->phy.is_ext_phy_connect = true; + } + + /* enable mac irq */ + emac_enable_intr(adpt); + + /* Reset phy related parameter */ + phy->link_up = 0; + phy->link_duplex = DUPLEX_UNKNOWN; + phy->link_speed = SPEED_UNKNOWN; + phy->link_pause = 0; + + /* Enable pause frames. */ + linkmode_mod_bit(SUPPORTED_Pause, adpt->phydev->supported, 1); + linkmode_mod_bit(SUPPORTED_Asym_Pause, adpt->phydev->supported, 1); + linkmode_mod_bit(SUPPORTED_Pause, adpt->phydev->advertising, 1); + linkmode_mod_bit(SUPPORTED_Asym_Pause, adpt->phydev->advertising, 1); + + adpt->phydev->irq = PHY_POLL; + phy_start(adpt->phydev); + + emac_napi_enable_all(adpt); + netif_start_queue(netdev); + CLR_FLAG(adpt, ADPT_STATE_DOWN); + /* check link status */ + SET_FLAG(adpt, ADPT_TASK_LSC_REQ); + + return ret; + +err_request_irq: + while (--i >= 0) + if (adpt->irq[i].irq) + free_irq(adpt->irq[i].irq, + &adpt->irq[i]); + + adpt->phy.ops.down(adpt); + return ret; +} + +/* Bring down the interface/HW */ +void emac_mac_down(struct emac_adapter *adpt, u32 ctrl) +{ + struct net_device *netdev = adpt->netdev; + struct emac_phy *phy = &adpt->phy; + struct emac_hw *hw = &adpt->hw; + unsigned long flags; + int i; + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) + return; + SET_FLAG(adpt, ADPT_STATE_DOWN); + + netif_stop_queue(netdev); + emac_napi_disable_all(adpt); + + phy_stop(adpt->phydev); + + /* Interrupts must be disabled before the PHY is disconnected, to + * avoid a race condition where adjust_link is null when we get + * an interrupt. + */ + emac_disable_intr(adpt); + phy->ops.down(adpt); + + for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) + if (adpt->irq[i].irq) + free_irq(adpt->irq[i].irq, &adpt->irq[i]); + + if ((adpt->phydev->phy_id == ATH8030_PHY_ID || + adpt->phydev->phy_id == ATH8031_PHY_ID || + adpt->phydev->phy_id == ATH8035_PHY_ID) && adpt->phy.is_ext_phy_connect) { + phy_disconnect(adpt->phydev); + adpt->phy.is_ext_phy_connect = false; + } + + CLR_FLAG(adpt, ADPT_TASK_LSC_REQ); + CLR_FLAG(adpt, ADPT_TASK_REINIT_REQ); + CLR_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ); + + cancel_work_sync(&adpt->hwtxtstamp_task); + spin_lock_irqsave(&adpt->hwtxtstamp_lock, flags); + __skb_queue_purge(&adpt->hwtxtstamp_pending_queue); + __skb_queue_purge(&adpt->hwtxtstamp_ready_queue); + spin_unlock_irqrestore(&adpt->hwtxtstamp_lock, flags); + + if (ctrl & EMAC_HW_CTRL_RESET_MAC) + emac_hw_reset_mac(hw); + + emac_clean_all_tx_queues(adpt); + emac_clean_all_rx_queues(adpt); +} + +/* Called when the network interface is made active */ +static int emac_open(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_phy *phy = &adpt->phy; + struct emac_irq_per_dev *irq = &adpt->irq[EMAC_WOL_IRQ]; + const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[EMAC_WOL_IRQ]; + int retval; + + /* allocate rx/tx dma buffer & descriptors */ + retval = emac_alloc_all_rtx_descriptor(adpt); + if (retval) { + emac_err(adpt, "error allocating rx/tx rings\n"); + goto err_alloc_rtx; + } + + pm_runtime_get_sync(netdev->dev.parent); + retval = emac_mac_up(adpt); + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + if (retval) + goto err_up; + + if (irq->irq) { + /* Register for EMAC WOL ISR */ + retval = request_threaded_irq(irq->irq, NULL, irq_cmn->handler, + IRQF_TRIGGER_LOW + | IRQF_ONESHOT, + irq_cmn->name, irq); + enable_irq_wake(irq->irq); + if (retval) { + emac_err(adpt, + "error:%d on request_irq(%d:%s flags:0x%lx)\n", + retval, irq->irq, irq_cmn->name, + irq_cmn->irqflags); + goto err_up; + } else { + phy->is_wol_irq_reg = true; + phy->is_wol_enabled = true; + emac_wol_gpio_irq(adpt, false); + } + } + return retval; + +err_up: + emac_free_all_rtx_descriptor(adpt); +err_alloc_rtx: + return retval; +} + +/* Called when the network interface is disabled */ +static int emac_close(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct emac_phy *phy = &adpt->phy; + + pm_runtime_get_sync(netdev->dev.parent); + + if (adpt->irq[EMAC_WOL_IRQ].irq) { + phy->is_wol_enabled = false; + free_irq(adpt->irq[EMAC_WOL_IRQ].irq, &adpt->irq[EMAC_WOL_IRQ]); + phy->is_wol_irq_reg = false; //Use false instead of 0 + disable_irq_wake(adpt->irq[EMAC_WOL_IRQ].irq); + } + + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN)) + emac_mac_down(adpt, EMAC_HW_CTRL_RESET_MAC); + else + emac_hw_reset_mac(hw); + + if (TEST_FLAG(hw, HW_PTP_CAP)) + emac_ptp_stop(hw); + + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + + emac_free_all_rtx_descriptor(adpt); + + return 0; +} + +/* Resize the descriptor rings */ +int emac_resize_rings(struct net_device *netdev) +{ + /* close and then re-open interface */ + emac_close(netdev); + return emac_open(netdev); +} + +/* IOCTL support for the interface */ +static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + + if (!netif_running(netdev)) + return -EINVAL; + + if (!netdev->phydev) + return -ENODEV; + + switch (cmd) { + case SIOCSHWTSTAMP: + if (TEST_FLAG(hw, HW_PTP_CAP)) + return emac_tstamp_ioctl(netdev, ifr, cmd); + fallthrough; + default: + return phy_mii_ioctl(netdev->phydev, ifr, cmd); + } +} + +/* Read statistics information from the HW */ +void emac_update_hw_stats(struct emac_adapter *adpt) +{ + u16 hw_reg_addr = 0; + u64 *stats_item = NULL; + u32 val; + + /* Prevent stats update while adapter is being reset, or if the + * connection is down. + */ + if (adpt->phydev->speed <= 0) + return; + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN) || TEST_FLAG(adpt, ADPT_STATE_RESETTING)) + return; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adpt->hw_stats.rx_ok; + + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + val = emac_reg_r32(&adpt->hw, EMAC, hw_reg_addr); + *stats_item += val; + stats_item++; + hw_reg_addr += sizeof(u32); + } + + /* additional rx status */ + val = emac_reg_r32(&adpt->hw, EMAC, EMAC_RXMAC_STATC_REG23); + adpt->hw_stats.rx_crc_align += val; + val = emac_reg_r32(&adpt->hw, EMAC, EMAC_RXMAC_STATC_REG24); + adpt->hw_stats.rx_jubbers += val; + + /* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adpt->hw_stats.tx_ok; + + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + val = emac_reg_r32(&adpt->hw, EMAC, hw_reg_addr); + *stats_item += val; + stats_item++; + hw_reg_addr += sizeof(u32); + } + + /* additional tx status */ + val = emac_reg_r32(&adpt->hw, EMAC, EMAC_TXMAC_STATC_REG25); + adpt->hw_stats.tx_col += val; +} + +/* Provide network statistics info for the interface */ +static void emac_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *net_stats) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw_stats *hw_stats = &adpt->hw_stats; + + spin_lock(&hw_stats->lock); + + memset(net_stats, 0, sizeof(struct rtnl_link_stats64)); + emac_update_hw_stats(adpt); + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_sz_ov + + hw_stats->rx_align_err; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_rxf_ov; + net_stats->rx_missed_errors = hw_stats->rx_rxf_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + spin_unlock(&hw_stats->lock); +} + +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = emac_start_xmit, + .ndo_set_mac_address = emac_set_mac_address, + .ndo_change_mtu = emac_change_mtu, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_get_stats64 = emac_get_stats64, + .ndo_set_features = emac_set_features, + .ndo_set_rx_mode = emac_set_rx_mode, +}; + +/* Reinitialize the interface/HW if required */ +static void emac_reinit_task_routine(struct emac_adapter *adpt) +{ + if (!TEST_FLAG(adpt, ADPT_TASK_REINIT_REQ)) + return; + CLR_FLAG(adpt, ADPT_TASK_REINIT_REQ); + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN) || + TEST_FLAG(adpt, ADPT_STATE_RESETTING)) + return; + + emac_reinit_locked(adpt); +} + +/* Watchdog task routine, called to reinitialize the EMAC */ +static void emac_work_thread(struct work_struct *work) +{ + struct emac_adapter *adpt = + container_of(work, struct emac_adapter, work_thread); + + if (!TEST_FLAG(adpt, ADPT_STATE_WATCH_DOG)) + emac_warn(adpt, timer, adpt->netdev, "flag STATE_WATCH_DOG doesn't set\n"); + + emac_reinit_task_routine(adpt); + + adpt->irq_status &= ~ISR_GPHY_LINK; + + adpt->phy.ops.periodic_task(adpt); + + CLR_FLAG(adpt, ADPT_STATE_WATCH_DOG); +} + +/* Initialize all queue data structures */ +static void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + int que_idx; + + adpt->num_txques = EMAC_DEF_TX_QUEUES; + adpt->num_rxques = EMAC_DEF_RX_QUEUES; + + for (que_idx = 0; que_idx < adpt->num_txques; que_idx++) { + struct emac_tx_queue *txque = &adpt->tx_queue[que_idx]; + + txque->que_idx = que_idx; + txque->netdev = adpt->netdev; + txque->dev = &pdev->dev; + } + + for (que_idx = 0; que_idx < adpt->num_rxques; que_idx++) { + struct emac_rx_queue *rxque = &adpt->rx_queue[que_idx]; + + rxque->que_idx = que_idx; + rxque->netdev = adpt->netdev; + rxque->dev = &pdev->dev; + } + + switch (adpt->num_rxques) { + case 4: + adpt->rx_queue[3].produce_reg = EMAC_MAILBOX_13; + adpt->rx_queue[3].produce_mask = RFD3_PROD_IDX_BMSK; + adpt->rx_queue[3].produce_shft = RFD3_PROD_IDX_SHFT; + + adpt->rx_queue[3].process_reg = EMAC_MAILBOX_13; + adpt->rx_queue[3].process_mask = RFD3_PROC_IDX_BMSK; + adpt->rx_queue[3].process_shft = RFD3_PROC_IDX_SHFT; + + adpt->rx_queue[3].consume_reg = EMAC_MAILBOX_8; + adpt->rx_queue[3].consume_mask = RFD3_CONS_IDX_BMSK; + adpt->rx_queue[3].consume_shft = RFD3_CONS_IDX_SHFT; + + adpt->rx_queue[3].irq = &adpt->irq[3]; + adpt->rx_queue[3].intr = adpt->irq[3].mask & ISR_RX_PKT; + + fallthrough; + case 3: + adpt->rx_queue[2].produce_reg = EMAC_MAILBOX_6; + adpt->rx_queue[2].produce_mask = RFD2_PROD_IDX_BMSK; + adpt->rx_queue[2].produce_shft = RFD2_PROD_IDX_SHFT; + + adpt->rx_queue[2].process_reg = EMAC_MAILBOX_6; + adpt->rx_queue[2].process_mask = RFD2_PROC_IDX_BMSK; + adpt->rx_queue[2].process_shft = RFD2_PROC_IDX_SHFT; + + adpt->rx_queue[2].consume_reg = EMAC_MAILBOX_7; + adpt->rx_queue[2].consume_mask = RFD2_CONS_IDX_BMSK; + adpt->rx_queue[2].consume_shft = RFD2_CONS_IDX_SHFT; + + adpt->rx_queue[2].irq = &adpt->irq[2]; + adpt->rx_queue[2].intr = adpt->irq[2].mask & ISR_RX_PKT; + + fallthrough; + case 2: + adpt->rx_queue[1].produce_reg = EMAC_MAILBOX_5; + adpt->rx_queue[1].produce_mask = RFD1_PROD_IDX_BMSK; + adpt->rx_queue[1].produce_shft = RFD1_PROD_IDX_SHFT; + + adpt->rx_queue[1].process_reg = EMAC_MAILBOX_5; + adpt->rx_queue[1].process_mask = RFD1_PROC_IDX_BMSK; + adpt->rx_queue[1].process_shft = RFD1_PROC_IDX_SHFT; + + adpt->rx_queue[1].consume_reg = EMAC_MAILBOX_7; + adpt->rx_queue[1].consume_mask = RFD1_CONS_IDX_BMSK; + adpt->rx_queue[1].consume_shft = RFD1_CONS_IDX_SHFT; + + adpt->rx_queue[1].irq = &adpt->irq[1]; + adpt->rx_queue[1].intr = adpt->irq[1].mask & ISR_RX_PKT; + + fallthrough; + case 1: + adpt->rx_queue[0].produce_reg = EMAC_MAILBOX_0; + adpt->rx_queue[0].produce_mask = RFD0_PROD_IDX_BMSK; + adpt->rx_queue[0].produce_shft = RFD0_PROD_IDX_SHFT; + + adpt->rx_queue[0].process_reg = EMAC_MAILBOX_0; + adpt->rx_queue[0].process_mask = RFD0_PROC_IDX_BMSK; + adpt->rx_queue[0].process_shft = RFD0_PROC_IDX_SHFT; + + adpt->rx_queue[0].consume_reg = EMAC_MAILBOX_3; + adpt->rx_queue[0].consume_mask = RFD0_CONS_IDX_BMSK; + adpt->rx_queue[0].consume_shft = RFD0_CONS_IDX_SHFT; + + adpt->rx_queue[0].irq = &adpt->irq[0]; + adpt->rx_queue[0].intr = adpt->irq[0].mask & ISR_RX_PKT; + break; + } + + switch (adpt->num_txques) { + case 4: + adpt->tx_queue[3].produce_reg = EMAC_MAILBOX_11; + adpt->tx_queue[3].produce_mask = H3TPD_PROD_IDX_BMSK; + adpt->tx_queue[3].produce_shft = H3TPD_PROD_IDX_SHFT; + + adpt->tx_queue[3].consume_reg = EMAC_MAILBOX_12; + adpt->tx_queue[3].consume_mask = H3TPD_CONS_IDX_BMSK; + adpt->tx_queue[3].consume_shft = H3TPD_CONS_IDX_SHFT; + + fallthrough; + case 3: + adpt->tx_queue[2].produce_reg = EMAC_MAILBOX_9; + adpt->tx_queue[2].produce_mask = H2TPD_PROD_IDX_BMSK; + adpt->tx_queue[2].produce_shft = H2TPD_PROD_IDX_SHFT; + + adpt->tx_queue[2].consume_reg = EMAC_MAILBOX_10; + adpt->tx_queue[2].consume_mask = H2TPD_CONS_IDX_BMSK; + adpt->tx_queue[2].consume_shft = H2TPD_CONS_IDX_SHFT; + + fallthrough; + case 2: + adpt->tx_queue[1].produce_reg = EMAC_MAILBOX_16; + adpt->tx_queue[1].produce_mask = H1TPD_PROD_IDX_BMSK; + adpt->tx_queue[1].produce_shft = H1TPD_PROD_IDX_SHFT; + + adpt->tx_queue[1].consume_reg = EMAC_MAILBOX_10; + adpt->tx_queue[1].consume_mask = H1TPD_CONS_IDX_BMSK; + adpt->tx_queue[1].consume_shft = H1TPD_CONS_IDX_SHFT; + + fallthrough; + case 1: + adpt->tx_queue[0].produce_reg = EMAC_MAILBOX_15; + adpt->tx_queue[0].produce_mask = NTPD_PROD_IDX_BMSK; + adpt->tx_queue[0].produce_shft = NTPD_PROD_IDX_SHFT; + + adpt->tx_queue[0].consume_reg = EMAC_MAILBOX_2; + adpt->tx_queue[0].consume_mask = NTPD_CONS_IDX_BMSK; + adpt->tx_queue[0].consume_shft = NTPD_CONS_IDX_SHFT; + break; + } +} + +/* Initialize various data structures */ +static void emac_init_adapter(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_hw *hw = &adpt->hw; + int max_frame; + + /* ids */ + hw->devid = (u16)emac_reg_field_r32(hw, EMAC, EMAC_DMA_MAS_CTRL, + DEV_ID_NUM_BMSK, DEV_ID_NUM_SHFT); + hw->revid = (u16)emac_reg_field_r32(hw, EMAC, EMAC_DMA_MAS_CTRL, + DEV_REV_NUM_BMSK, DEV_REV_NUM_SHFT); + + /* descriptors */ + adpt->num_txdescs = EMAC_DEF_TX_DESCS; + adpt->num_rxdescs = EMAC_DEF_RX_DESCS; + + /* mtu */ + adpt->netdev->mtu = ETH_DATA_LEN; + max_frame = adpt->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + adpt->rxbuf_size = adpt->netdev->mtu > EMAC_DEF_RX_BUF_SIZE ? + ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE; + + /* dma */ + hw->dma_order = emac_dma_ord_out; + hw->dmar_block = emac_dma_req_4096; + hw->dmaw_block = emac_dma_req_128; + hw->dmar_dly_cnt = DMAR_DLY_CNT_DEF; + hw->dmaw_dly_cnt = DMAW_DLY_CNT_DEF; + hw->tpd_burst = TXQ0_NUM_TPD_PREF_DEF; + hw->rfd_burst = RXQ0_NUM_RFD_PREF_DEF; + + /* flow control */ + phy->req_fc_mode = EMAC_FC_FULL; + phy->cur_fc_mode = EMAC_FC_FULL; + phy->disable_fc_autoneg = false; + + /* rss */ + hw->rss_initialized = false; + hw->rss_hstype = 0; + hw->rss_idt_size = 0; + hw->rss_base_cpu = 0; + memset(hw->rss_idt, 0x0, sizeof(hw->rss_idt)); + memset(hw->rss_key, 0x0, sizeof(hw->rss_key)); + + /* irq moderator */ + hw->irq_mod = ((EMAC_DEF_RX_IRQ_MOD / 2) << IRQ_MODERATOR2_INIT_SHFT) | + ((EMAC_DEF_TX_IRQ_MOD / 2) << IRQ_MODERATOR_INIT_SHFT); + + /* others */ + hw->preamble = EMAC_PREAMBLE_DEF; + adpt->wol = EMAC_WOL_PHY; + + adpt->phy.is_ext_phy_connect = false; +} + +/* Get the clock */ +static int emac_clks_get(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + unsigned int i; + + for (i = 0; i < EMAC_CLK_CNT; i++) { + struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]); + + if (IS_ERR(clk)) { + emac_err(adpt, "error:%ld on clk_get(%s)\n", + PTR_ERR(clk), emac_clk_name[i]); + + return PTR_ERR(clk); + } + + adpt->clk[i].clk = clk; + } + + return 0; +} + +/* Initialize clocks */ +static int emac_clks_phase1_init(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + int retval; + + retval = emac_clks_get(pdev, adpt); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_AXI); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_CFG_AHB); + if (retval) + return retval; + + retval = emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, + EMC_CLK_RATE_19_2MHZ); + if (retval) + return retval; + + return emac_clk_prepare_enable(adpt, EMAC_CLK_HIGH_SPEED); +} + +/* Enable clocks; needs emac_init_clks to be called before */ +static int emac_clks_phase2_init(struct emac_adapter *adpt) +{ + int retval; + + retval = emac_clk_set_rate(adpt, EMAC_CLK_TX, EMC_CLK_RATE_125MHZ); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_TX); + if (retval) + return retval; + + retval = emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, + EMC_CLK_RATE_125MHZ); + if (retval) + return retval; + + retval = emac_clk_set_rate(adpt, EMAC_CLK_MDIO, EMC_CLK_RATE_25MHZ); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_MDIO); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_RX); + if (retval) + return retval; + + retval = emac_clk_prepare_enable(adpt, EMAC_CLK_SYS); + + return retval; +} + +/* Disable clocks */ +static void emac_disable_clks(struct emac_adapter *adpt) +{ + u8 i; + + for (i = 0; i < EMAC_CLK_CNT; i++) { + struct emac_clk *clk = &adpt->clk[EMAC_CLK_CNT - i - 1]; + + if (clk->enabled) { + clk_disable_unprepare(clk->clk); + clk->enabled = false; + } + } +} + +static int msm_emac_pinctrl_init(struct emac_adapter *adpt, struct device *dev) +{ + adpt->pinctrl = devm_pinctrl_get(dev); + if (IS_ERR_OR_NULL(adpt->pinctrl)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to get pin ctrl\n", + PTR_ERR(adpt->pinctrl)); + return PTR_ERR(adpt->pinctrl); + } + adpt->mdio_pins_clk_active = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_MDIO_CLK_ACTIVE); + if (IS_ERR_OR_NULL(adpt->mdio_pins_clk_active)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup mdio clk pinctrl active state\n", + PTR_ERR(adpt->mdio_pins_clk_active)); + return PTR_ERR(adpt->mdio_pins_clk_active); + } + + adpt->mdio_pins_clk_sleep = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_MDIO_CLK_SLEEP); + if (IS_ERR_OR_NULL(adpt->mdio_pins_clk_sleep)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup mdio pinctrl sleep state\n", + PTR_ERR(adpt->mdio_pins_clk_sleep)); + return PTR_ERR(adpt->mdio_pins_clk_sleep); + } + + adpt->mdio_pins_data_active = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_MDIO_DATA_ACTIVE); + if (IS_ERR_OR_NULL(adpt->mdio_pins_data_active)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup mdio data pinctrl active state\n", + PTR_ERR(adpt->mdio_pins_data_active)); + return PTR_ERR(adpt->mdio_pins_data_active); + } + + adpt->mdio_pins_data_sleep = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_MDIO_DATA_SLEEP); + if (IS_ERR_OR_NULL(adpt->mdio_pins_data_sleep)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup mdio pinctrl sleep state\n", + PTR_ERR(adpt->mdio_pins_data_sleep)); + return PTR_ERR(adpt->mdio_pins_data_sleep); + } + + adpt->ephy_pins_active = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_EPHY_ACTIVE); + if (IS_ERR_OR_NULL(adpt->ephy_pins_active)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup ephy pinctrl active state\n", + PTR_ERR(adpt->ephy_pins_active)); + return PTR_ERR(adpt->ephy_pins_active); + } + + adpt->ephy_pins_sleep = pinctrl_lookup_state(adpt->pinctrl, + EMAC_PINCTRL_STATE_EPHY_SLEEP); + if (IS_ERR_OR_NULL(adpt->ephy_pins_sleep)) { + emac_dbg(adpt, probe, adpt->netdev, "error:%ld Failed to lookup ephy pinctrl sleep state\n", + PTR_ERR(adpt->ephy_pins_sleep)); + return PTR_ERR(adpt->ephy_pins_sleep); + } + + return 0; +} + +static void msm_emac_clk_path_vote(struct emac_adapter *adpt, + enum emac_bus_vote vote) +{ + if (adpt->bus_cl_hdl) + if (msm_bus_scale_client_update_request(adpt->bus_cl_hdl, vote)) + emac_err(adpt, "Failed to vote for bus bw\n"); +} + +static void msm_emac_clk_path_teardown(struct emac_adapter *adpt) +{ + if (adpt->bus_cl_hdl) { + msm_emac_clk_path_vote(adpt, EMAC_NO_PERF_VOTE); + msm_bus_scale_unregister_client(adpt->bus_cl_hdl); + adpt->bus_cl_hdl = 0; + } +} + +static void msm_emac_clk_path_init(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + /* Get bus scalling data */ + adpt->bus_scale_table = msm_bus_cl_get_pdata(pdev); + if (IS_ERR_OR_NULL(adpt->bus_scale_table)) { + emac_err(adpt, "bus scaling is disabled\n"); + return; + } + + adpt->bus_cl_hdl = msm_bus_scale_register_client(adpt->bus_scale_table); + if (!adpt->bus_cl_hdl) + emac_err(adpt, "Failed to register BUS scaling client!!\n"); +} + +/* Get the resources */ +static int emac_get_resources(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + struct resource *res; + struct net_device *netdev = adpt->netdev; + struct device_node *node = pdev->dev.of_node; + static const char * const res_name[] = {"emac", "emac_csr", + "emac_1588"}; + int retval = 0, bus_id = 0; + u8 mac_addr[ETH_ALEN] = {0}; + u8 i; + + if (!node) + return -ENODEV; + + /* get bus id */ + bus_id = of_alias_get_id(node, "emac"); + if (bus_id >= 0) + pdev->id = bus_id; + + /* get time stamp enable flag */ + if (ACPI_COMPANION(&pdev->dev)) + adpt->tstamp_en = device_property_read_bool(&pdev->dev, + "qcom,emac-tstamp-en"); + else + adpt->tstamp_en = of_property_read_bool(node, "qcom,emac-tstamp-en"); + + /* get mac address */ + if (ACPI_COMPANION(&pdev->dev)) { + retval = device_property_read_u8_array(&pdev->dev, + "mac-address", + mac_addr, ETH_ALEN); + if (!retval) + ether_addr_copy((u8 *)netdev->dev_addr, mac_addr); + } else { + retval = of_get_mac_address(node, mac_addr); + if (!retval) + ether_addr_copy((u8 *)netdev->dev_addr, mac_addr); + } + + /* Get pinctrl */ + retval = msm_emac_pinctrl_init(adpt, &pdev->dev); + if (retval) + return retval; + + adpt->gpio_on = msm_emac_request_pinctrl_on; + adpt->gpio_off = msm_emac_request_pinctrl_off; + + /* get irqs */ + for (i = 0; i < EMAC_IRQ_CNT; i++) { + retval = platform_get_irq_byname(pdev, + emac_irq_cmn_tbl[i].name); + adpt->irq[i].irq = (retval > 0) ? retval : 0; + } + + /* get register addresses */ + retval = 0; + for (i = 0; i < NUM_EMAC_REG_BASES; i++) { + /* 1588 is required only if tstamp is enabled */ + if (i == EMAC_1588 && !adpt->tstamp_en) + continue; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name[i]); + adpt->hw.reg_addr[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(adpt->hw.reg_addr[i])) { + emac_err(adpt, "can't remap %s\n", res_name[i]); + retval = PTR_ERR(adpt->hw.reg_addr[i]); + break; + } + } + + netdev->base_addr = (unsigned long)adpt->hw.reg_addr[EMAC]; + + if (ACPI_HANDLE(adpt->dev)) + retval = emac_acpi_get_resources(pdev, adpt); + + msm_emac_clk_path_init(pdev, adpt); + return retval; +} + +/* Get the regulator */ +static int emac_get_regulator(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + struct regulator *vreg; + u8 i; + int len = 0; + u32 tmp[EMAC_VREG_CNT]; + + for (i = 0; i < EMAC_VREG_CNT; i++) { + vreg = devm_regulator_get(&pdev->dev, emac_regulator_name[i]); + + if (IS_ERR(vreg)) { + emac_err(adpt, "error:%ld unable to get emac %s\n", + PTR_ERR(vreg), emac_regulator_name[i]); + return PTR_ERR(vreg); + } + adpt->vreg[i].vreg = vreg; + } + + if (of_get_property(pdev->dev.of_node, + "qcom,vdd-voltage-level", &len)) { + if (len == sizeof(tmp)) { + of_property_read_u32_array(pdev->dev.of_node, + "qcom,vdd-voltage-level", + tmp, len / sizeof(*tmp)); + + for (i = 0; i < EMAC_VREG_CNT; i++) + adpt->vreg[i].voltage_uv = tmp[i]; + } else { + emac_err(adpt, "unable to read voltage values for emac LDOs\n"); + return -EINVAL; + } + } else { + emac_err(adpt, "unable to read qcom,vdd-voltage-level emac dt property\n"); + return -EINVAL; + } + return 0; +} + +/* Set the Voltage */ +static int emac_set_voltage(struct emac_adapter *adpt, enum emac_vreg_id id, + int min_uV, int max_uV) +{ + int retval = regulator_set_voltage(adpt->vreg[id].vreg, min_uV, max_uV); + + if (retval) + emac_err(adpt, + "error:%d set voltage for %s\n", + retval, emac_regulator_name[id]); + return retval; +} + +/* Enable the emac core, internal/external phy regulator */ +static int emac_enable_regulator(struct emac_adapter *adpt, u8 start, u8 end) +{ + int retval = 0; + u8 i; + + for (i = start; i <= end; i++) { + if (adpt->vreg[i].enabled) + continue; + + if (adpt->vreg[i].voltage_uv) { + retval = emac_set_voltage(adpt, i, + adpt->vreg[i].voltage_uv, + adpt->vreg[i].voltage_uv); + if (retval) + goto err; + } + + retval = regulator_enable(adpt->vreg[i].vreg); + if (retval) { + emac_err(adpt, "error:%d enable regulator %s\n", + retval, emac_regulator_name[EMAC_VREG3]); + goto err; + } else { + adpt->vreg[i].enabled = true; + } + } +err: + return retval; +} + +/* Disable the emac core, internal/external phy regulator */ +static void emac_disable_regulator(struct emac_adapter *adpt, u8 start, u8 end) +{ + u8 i; + + for (i = start; i <= end; i++) { + struct emac_regulator *vreg = &adpt->vreg[i]; + + if (!vreg->enabled) + continue; + + regulator_disable(vreg->vreg); + vreg->enabled = false; + + if (adpt->vreg[i].voltage_uv) { + emac_set_voltage(adpt, i, + 0, adpt->vreg[i].voltage_uv); + } + } +} + +/* LDO init */ +static int msm_emac_ldo_init(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + int retval = 0; + + retval = emac_get_regulator(pdev, adpt); + if (retval) + return retval; + + retval = emac_enable_regulator(adpt, EMAC_VREG1, EMAC_VREG5); + if (retval) + return retval; + return 0; +} + +static int emac_pm_suspend(struct device *device, bool wol_enable) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct emac_phy *phy = &adpt->phy; + u32 wufc = adpt->wol; + + /* Check link state. Don't suspend if link is up */ + if (netif_carrier_ok(adpt->netdev) && !(adpt->wol & EMAC_WOL_MAGIC)) + return -EPERM; + + /* cannot suspend if WOL interrupt is not enabled */ + if (!adpt->irq[EMAC_WOL_IRQ].irq) + return -EPERM; + + if (netif_running(netdev)) { + /* ensure no task is running and no reset is in progress */ + while (TEST_N_SET_FLAG(adpt, ADPT_STATE_RESETTING)) + /* Reset might take few 10s of ms */ + msleep(EMAC_ADPT_RESET_WAIT_TIME); + + emac_mac_down(adpt, 0); + + CLR_FLAG(adpt, ADPT_STATE_RESETTING); + } + + phy_suspend(adpt->phydev); + flush_delayed_work(&adpt->phydev->state_queue); + if (adpt->phydev->phy_id != QCA8337_PHY_ID) + emac_hw_config_pow_save(hw, adpt->phydev->speed, !!wufc, + !!(wufc & EMAC_WOL_PHY)); + + if (!adpt->phydev->link && phy->is_wol_irq_reg) { + int value, i; + + for (i = 0; i < QCA8337_NUM_PHYS ; i++) { + /* ePHY driver keep external phy into power down mode + * if WOL is not enabled. This change is to make sure + * to keep ePHY in active state for LINK UP to work + */ + value = mdiobus_read(adpt->phydev->mdio.bus, i, MII_BMCR); + value &= ~BMCR_PDOWN; + mdiobus_write(adpt->phydev->mdio.bus, i, MII_BMCR, value); + + /* Enable EPHY Link UP interrupt */ + mdiobus_write(adpt->phydev->mdio.bus, i, MII_INT_ENABLE, + LINK_SUCCESS_INTERRUPT | + LINK_SUCCESS_BX); + } + + /* enable switch interrupts */ + if (adpt->phydev->phy_id == QCA8337_PHY_ID) { + qca8337_write(adpt->phydev->priv, + QCA8337_GLOBAL_INT1_MASK, 0x8000); + } + + if (wol_enable && phy->is_wol_irq_reg) + emac_wol_gpio_irq(adpt, true); + } + + adpt->gpio_off(adpt, true, false); + msm_emac_clk_path_vote(adpt, EMAC_NO_PERF_VOTE); + return 0; +} + +static int emac_pm_resume(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct emac_phy *phy = &adpt->phy; + int retval = 0, i; + + adpt->gpio_on(adpt, true, false); + msm_emac_clk_path_vote(adpt, EMAC_MAX_PERF_VOTE); + emac_hw_reset_mac(hw); + + /* Disable EPHY Link UP interrupt */ + if (phy->is_wol_irq_reg) { + for (i = 0; i < QCA8337_NUM_PHYS ; i++) + mdiobus_write(adpt->phydev->mdio.bus, i, MII_INT_ENABLE, 0); + } + + /* disable switch interrupts */ + if (adpt->phydev->phy_id == QCA8337_PHY_ID) + qca8337_write(adpt->phydev->priv, QCA8337_GLOBAL_INT1, 0x8000); + + phy_resume(adpt->phydev); + + if (netif_running(netdev)) { + retval = emac_mac_up(adpt); + if (retval) + goto error; + } + return 0; +error: + return retval; +} + +#ifdef CONFIG_PM +static int emac_pm_runtime_suspend(struct device *device) +{ + return emac_pm_suspend(device, true); +} + +static int emac_pm_runtime_resume(struct device *device) +{ + return emac_pm_resume(device); +} + +static int emac_pm_runtime_idle(struct device *device) +{ + return 0; +} +#else +#define emac_pm_runtime_suspend NULL +#define emac_pm_runtime_resume NULL +#define emac_pm_runtime_idle NULL +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_SLEEP +static int emac_pm_sys_suspend(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_phy *phy = &adpt->phy; + + /* Disable EPHY WOL interrupt*/ + if (phy->is_wol_irq_reg) + emac_wol_gpio_irq(adpt, false); + + if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) { + emac_pm_suspend(device, false); + + /* Synchronize runtime-pm and system-pm states: + * at this point we are already suspended. However, the + * runtime-PM framework still thinks that we are active. + * The three calls below let the runtime-PM know that we are + * suspended already without re-invoking the suspend callback + */ + if (adpt->wol & EMAC_WOL_MAGIC) { + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_runtime_put_autosuspend(netdev->dev.parent); + } + pm_runtime_disable(netdev->dev.parent); + pm_runtime_set_suspended(netdev->dev.parent); + pm_runtime_enable(netdev->dev.parent); + + /* Clear the Magic packet flag */ + adpt->wol &= ~EMAC_WOL_MAGIC; + } + netif_device_detach(netdev); + emac_disable_clks(adpt); + emac_disable_regulator(adpt, EMAC_VREG1, EMAC_VREG2); + return 0; +} + +static int emac_pm_sys_resume(struct device *device) +{ + struct platform_device *pdev = to_platform_device(device); + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_phy *phy = &adpt->phy; + + emac_enable_regulator(adpt, EMAC_VREG1, EMAC_VREG2); + emac_clks_phase1_init(pdev, adpt); + emac_clks_phase2_init(adpt); + netif_device_attach(netdev); + + if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) { + /* if runtime PM callback was not invoked (when both runtime-pm + * and systme-pm are in transition concurrently) + */ + emac_pm_resume(device); + pm_runtime_mark_last_busy(netdev->dev.parent); + pm_request_autosuspend(netdev->dev.parent); + } + /* Enable EPHY WOL interrupt*/ + if (phy->is_wol_irq_reg) + emac_wol_gpio_irq(adpt, true); + return 0; +} +#endif + +/* Probe function */ +static int emac_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + struct emac_adapter *adpt; + struct emac_phy *phy; + struct emac_hw *hw; + int ret; + u8 i; + u32 hw_ver; + + /* The EMAC itself is capable of 64-bit DMA, so try that first. */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + /* Some platforms may restrict the EMAC's address bus to less + * then the size of DDR. In this case, we need to try a + * smaller mask. We could try every possible smaller mask, + * but that's overkill. Instead, just fall to 46-bit, which + * should always work. + */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46)); + if (ret) { + dev_err(&pdev->dev, "could not set DMA mask\n"); + return ret; + } + } + + netdev = alloc_etherdev(sizeof(struct emac_adapter)); + if (!netdev) { + dev_err(&pdev->dev, "etherdev alloc failed\n"); + return -ENOMEM; + } + + dev_set_drvdata(&pdev->dev, netdev); + SET_NETDEV_DEV(netdev, &pdev->dev); + + adpt = netdev_priv(netdev); + adpt->netdev = netdev; + phy = &adpt->phy; + hw = &adpt->hw; + adpt->msg_enable = netif_msg_init(msm_emac_msglvl, EMAC_MSG_DEFAULT); + + for (i = 0; i < EMAC_IRQ_CNT; i++) { + adpt->irq[i].idx = i; + adpt->irq[i].mask = emac_irq_cmn_tbl[i].init_mask; + } + adpt->irq[0].mask |= (msm_emac_intr_ext ? IMR_EXTENDED_MASK : + IMR_NORMAL_MASK); + + ret = emac_get_resources(pdev, adpt); + if (ret) + goto err_get_resource; + + ret = msm_emac_ldo_init(pdev, adpt); + if (ret) + goto err_ldo_init; + + /* initialize clocks */ + ret = emac_clks_phase1_init(pdev, adpt); + if (ret) + goto err_clk_init; + + netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; + netdev->irq = adpt->irq[0].irq; + + if (adpt->tstamp_en) + adpt->rrdesc_size = EMAC_TS_RRDESC_SIZE; + else + adpt->rrdesc_size = EMAC_RRDESC_SIZE; + + adpt->tpdesc_size = EMAC_TPDESC_SIZE; + adpt->rfdesc_size = EMAC_RFDESC_SIZE; + + if (adpt->tstamp_en) + SET_FLAG(hw, HW_PTP_CAP); + + /* init netdev */ + netdev->netdev_ops = &emac_netdev_ops; + + emac_set_ethtool_ops(netdev); + + /* init internal phy */ + ret = emac_phy_config_internal(pdev, adpt); + if (ret) + goto err_clk_init; + + /* enable clocks */ + ret = emac_clks_phase2_init(adpt); + if (ret) + goto err_clk_init; + + hw_ver = emac_reg_r32(hw, EMAC, EMAC_CORE_HW_VERSION); + + /* init adapter */ + emac_init_adapter(adpt); + + /* Configure MDIO lines */ + ret = adpt->gpio_on(adpt, true, true); + if (ret) + goto err_clk_init; + + /* init external phy */ + ret = emac_phy_config_external(pdev, adpt); + if (ret) + goto err_init_mdio_gpio; + + /* reset mac */ + emac_hw_reset_mac(hw); + + /* set hw features */ + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->hw_features = netdev->features; + + netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + INIT_WORK(&adpt->work_thread, emac_work_thread); + + /* Initialize queues */ + emac_mac_rx_tx_ring_init_all(pdev, adpt); + + for (i = 0; i < adpt->num_rxques; i++) + netif_napi_add(netdev, &adpt->rx_queue[i].napi, + emac_napi_rtx); + + spin_lock_init(&adpt->hw_stats.lock); + spin_lock_init(&adpt->hwtxtstamp_lock); + spin_lock_init(&phy->wol_irq_lock); + skb_queue_head_init(&adpt->hwtxtstamp_pending_queue); + skb_queue_head_init(&adpt->hwtxtstamp_ready_queue); + INIT_WORK(&adpt->hwtxtstamp_task, emac_hwtxtstamp_task_routine); + adpt->link_wlock = wakeup_source_register(&pdev->dev, dev_name(&pdev->dev)); + + SET_FLAG(hw, HW_VLANSTRIP_EN); + SET_FLAG(adpt, ADPT_STATE_DOWN); + strscpy(netdev->name, "eth%d", sizeof(netdev->name)); + + pm_runtime_set_autosuspend_delay(&pdev->dev, EMAC_TRY_LINK_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + /* if !CONFIG_PM_RUNTIME then enable all the resources here and mange + * resources from system suspend/resume callbacks + */ + if (!pm_runtime_enabled(&pdev->dev)) + emac_pm_resume(&pdev->dev); + + /* libphy will determine the link state */ + netif_carrier_off(netdev); + + ret = register_netdev(netdev); + if (ret) { + emac_err(adpt, "register netdevice failed\n"); + goto err_undo_napi; + } + + if (TEST_FLAG(hw, HW_PTP_CAP)) { + pm_runtime_get_sync(&pdev->dev); + emac_ptp_init(adpt->netdev); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + } + + emac_dbg(adpt, probe, adpt->netdev, "HW ID %d.%d, HW version %d.%d.%d\n", + hw->devid, hw->revid, + (hw_ver & MAJOR_BMSK) >> MAJOR_SHFT, + (hw_ver & MINOR_BMSK) >> MINOR_SHFT, + (hw_ver & STEP_BMSK) >> STEP_SHFT); + + return 0; + +err_undo_napi: + for (i = 0; i < adpt->num_rxques; i++) + netif_napi_del(&adpt->rx_queue[i].napi); + if (!ACPI_COMPANION(&pdev->dev)) + put_device(&adpt->phydev->mdio.dev); + mdiobus_unregister(adpt->mii_bus); +err_init_mdio_gpio: + adpt->gpio_off(adpt, true, true); +err_clk_init: + emac_disable_clks(adpt); +err_ldo_init: + emac_disable_regulator(adpt, EMAC_VREG1, EMAC_VREG5); +err_get_resource: + free_netdev(netdev); + + return ret; +} + +static int emac_remove(struct platform_device *pdev) +{ + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct emac_sgmii *sgmii = adpt->phy.private; + struct emac_phy *phy = &adpt->phy; + u32 i; + + if (!pm_runtime_enabled(&pdev->dev) || + !pm_runtime_suspended(&pdev->dev)) { + if (netif_running(netdev)) + emac_mac_down(adpt, 0); + + pm_runtime_disable(netdev->dev.parent); + pm_runtime_set_suspended(netdev->dev.parent); + pm_runtime_enable(netdev->dev.parent); + } + + pm_runtime_disable(netdev->dev.parent); + + /* Disable EPHY WOL interrupt in suspend */ + if (phy->is_wol_irq_reg) + emac_wol_gpio_irq(adpt, false); + + mdiobus_unregister(adpt->mii_bus); + unregister_netdev(netdev); + + for (i = 0; i < adpt->num_rxques; i++) + netif_napi_del(&adpt->rx_queue[i].napi); + + wakeup_source_unregister(adpt->link_wlock); + + if (TEST_FLAG(hw, HW_PTP_CAP)) + emac_ptp_remove(netdev); + + adpt->gpio_off(adpt, true, true); + emac_disable_clks(adpt); + emac_disable_regulator(adpt, EMAC_VREG1, EMAC_VREG5); + msm_emac_clk_path_teardown(adpt); + + if (!ACPI_COMPANION(&pdev->dev)) + put_device(&adpt->phydev->mdio.dev); + + if (sgmii->digital) + iounmap(sgmii->digital); + if (sgmii->base) + iounmap(sgmii->base); + + free_netdev(netdev); + dev_set_drvdata(&pdev->dev, NULL); + return 0; +} + +static const struct dev_pm_ops emac_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(emac_pm_sys_suspend, + emac_pm_sys_resume + ) + SET_RUNTIME_PM_OPS(emac_pm_runtime_suspend, + emac_pm_runtime_resume, + emac_pm_runtime_idle + ) +}; + +static const struct of_device_id emac_dt_match[] = { + { + .compatible = "qcom,emac", + }, + { + .compatible = "qcom,mdm9607-emac", + }, + {} +}; +MODULE_DEVICE_TABLE(of, emac_dt_match); + +static struct platform_driver emac_platform_driver = { + .probe = emac_probe, + .remove = emac_remove, + .driver = { + .name = "qcom-emac", + .pm = &emac_pm_ops, + .of_match_table = emac_dt_match, + .acpi_match_table = ACPI_PTR(emac_acpi_match), + }, +}; + +static int __init emac_init_module(void) +{ + return platform_driver_register(&emac_platform_driver); +} + +static void __exit emac_exit_module(void) +{ + platform_driver_unregister(&emac_platform_driver); +} + +module_init(emac_init_module); +module_exit(emac_exit_module); + +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac_main.h b/drivers/net/ethernet/qualcomm/emac/emac_main.h new file mode 100644 index 000000000000..57f74ec4bbb2 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_main.h @@ -0,0 +1,787 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _QCOM_EMAC_MAIN_H_ +#define _QCOM_EMAC_MAIN_H_ + +#include +#include +#include +#include +#include +//#include + +#include "emac_phy.h" + +/* Device IDs */ +#define EMAC_DEV_ID 0x0040 + +/* DMA address */ +#define DMA_ADDR_HI_MASK 0xffffffff00000000ULL +#define DMA_ADDR_LO_MASK 0x00000000ffffffffULL + +#define EMAC_DMA_ADDR_HI(_addr) \ + ((u32)(((u64)(_addr) & DMA_ADDR_HI_MASK) >> 32)) +#define EMAC_DMA_ADDR_LO(_addr) \ + ((u32)((u64)(_addr) & DMA_ADDR_LO_MASK)) + +/* 4 emac core irq and 1 wol irq */ +#define EMAC_NUM_CORE_IRQ 4 +#define EMAC_CORE0_IRQ 0 +#define EMAC_CORE1_IRQ 1 +#define EMAC_CORE2_IRQ 2 +#define EMAC_CORE3_IRQ 3 +#define EMAC_WOL_IRQ 4 +#define EMAC_IRQ_CNT 5 +/* mdio/mdc gpios */ +#define EMAC_GPIO_CNT 2 + +#define EMAC_ADPT_RESET_WAIT_TIME 20 + +/** + * Requested EMAC votes for BUS bandwidth + * + * EMAC_NO_PERF_VOTE BUS Vote for inactive EMAC session or disconnect + * EMAC_MAX_PERF_VOTE Maximum BUS bandwidth vote + * + */ +enum emac_bus_vote { + EMAC_NO_PERF_VOTE = 0, + EMAC_MAX_PERF_VOTE +}; + +enum emac_vreg_id { + EMAC_VREG1, + EMAC_VREG2, + EMAC_VREG3, + EMAC_VREG4, + EMAC_VREG5, + EMAC_VREG_CNT +}; + +enum emac_clk_id { + EMAC_CLK_AXI, + EMAC_CLK_CFG_AHB, + EMAC_CLK_HIGH_SPEED, + EMAC_CLK_MDIO, + EMAC_CLK_TX, + EMAC_CLK_RX, + EMAC_CLK_SYS, + EMAC_CLK_CNT +}; + +#define KHz(RATE) ((RATE) * 1000) +#define MHz(RATE) (KHz(RATE) * 1000) + +enum emac_clk_rate { + EMC_CLK_RATE_2_5MHZ = KHz(2500), + EMC_CLK_RATE_19_2MHZ = KHz(19200), + EMC_CLK_RATE_25MHZ = MHz(25), + EMC_CLK_RATE_125MHZ = MHz(125), +}; + +#define EMAC_LINK_SPEED_UNKNOWN 0x0 +#define EMAC_LINK_SPEED_10_HALF 0x0001 +#define EMAC_LINK_SPEED_10_FULL 0x0002 +#define EMAC_LINK_SPEED_100_HALF 0x0004 +#define EMAC_LINK_SPEED_100_FULL 0x0008 +#define EMAC_LINK_SPEED_1GB_FULL 0x0020 + +#define EMAC_MAX_SETUP_LNK_CYCLE 100 + +/* Wake On Lan */ +#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ +#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ + +enum emac_reg_bases { + EMAC, + EMAC_CSR, + EMAC_1588, + NUM_EMAC_REG_BASES +}; + +/* DMA Order Settings */ +enum emac_dma_order { + emac_dma_ord_in = 1, + emac_dma_ord_enh = 2, + emac_dma_ord_out = 4 +}; + +enum emac_dma_req_block { + emac_dma_req_128 = 0, + emac_dma_req_256 = 1, + emac_dma_req_512 = 2, + emac_dma_req_1024 = 3, + emac_dma_req_2048 = 4, + emac_dma_req_4096 = 5 +}; + +/* IEEE1588 */ +enum emac_ptp_clk_mode { + emac_ptp_clk_mode_oc_two_step, + emac_ptp_clk_mode_oc_one_step +}; + +enum emac_ptp_mode { + emac_ptp_mode_slave, + emac_ptp_mode_master +}; + +struct emac_hw_stats { + /* rx */ + u64 rx_ok; /* good packets */ + u64 rx_bcast; /* good broadcast packets */ + u64 rx_mcast; /* good multicast packets */ + u64 rx_pause; /* pause packet */ + u64 rx_ctrl; /* control packets other than pause frame. */ + u64 rx_fcs_err; /* packets with bad FCS. */ + u64 rx_len_err; /* packets with length mismatch */ + u64 rx_byte_cnt; /* good bytes count (without FCS) */ + u64 rx_runt; /* runt packets */ + u64 rx_frag; /* fragment count */ + u64 rx_sz_64; /* packets that are 64 bytes */ + u64 rx_sz_65_127; /* packets that are 65-127 bytes */ + u64 rx_sz_128_255; /* packets that are 128-255 bytes */ + u64 rx_sz_256_511; /* packets that are 256-511 bytes */ + u64 rx_sz_512_1023; /* packets that are 512-1023 bytes */ + u64 rx_sz_1024_1518; /* packets that are 1024-1518 bytes */ + u64 rx_sz_1519_max; /* packets that are 1519-MTU bytes*/ + u64 rx_sz_ov; /* packets that are >MTU bytes (truncated) */ + u64 rx_rxf_ov; /* packets dropped due to RX FIFO overflow */ + u64 rx_align_err; /* alignment errors */ + u64 rx_bcast_byte_cnt; /* broadcast packets byte count (without FCS) */ + u64 rx_mcast_byte_cnt; /* multicast packets byte count (without FCS) */ + u64 rx_err_addr; /* packets dropped due to address filtering */ + u64 rx_crc_align; /* CRC align errors */ + u64 rx_jubbers; /* jubbers */ + + /* tx */ + u64 tx_ok; /* good packets */ + u64 tx_bcast; /* good broadcast packets */ + u64 tx_mcast; /* good multicast packets */ + u64 tx_pause; /* pause packets */ + u64 tx_exc_defer; /* packets with excessive deferral */ + u64 tx_ctrl; /* control packets other than pause frame */ + u64 tx_defer; /* packets that are deferred. */ + u64 tx_byte_cnt; /* good bytes count (without FCS) */ + u64 tx_sz_64; /* packets that are 64 bytes */ + u64 tx_sz_65_127; /* packets that are 65-127 bytes */ + u64 tx_sz_128_255; /* packets that are 128-255 bytes */ + u64 tx_sz_256_511; /* packets that are 256-511 bytes */ + u64 tx_sz_512_1023; /* packets that are 512-1023 bytes */ + u64 tx_sz_1024_1518; /* packets that are 1024-1518 bytes */ + u64 tx_sz_1519_max; /* packets that are 1519-MTU bytes */ + u64 tx_1_col; /* packets single prior collision */ + u64 tx_2_col; /* packets with multiple prior collisions */ + u64 tx_late_col; /* packets with late collisions */ + u64 tx_abort_col; /* packets aborted due to excess collisions */ + u64 tx_underrun; /* packets aborted due to FIFO underrun */ + u64 tx_rd_eop; /* count of reads beyond EOP */ + u64 tx_len_err; /* packets with length mismatch */ + u64 tx_trunc; /* packets truncated due to size >MTU */ + u64 tx_bcast_byte; /* broadcast packets byte count (without FCS) */ + u64 tx_mcast_byte; /* multicast packets byte count (without FCS) */ + u64 tx_col; /* collisions */ + + spinlock_t lock; /* prevent multiple simultaneous readers */ +}; + +enum emac_hw_flags { + EMAC_FLAG_HW_PROMISC_EN, + EMAC_FLAG_HW_VLANSTRIP_EN, + EMAC_FLAG_HW_MULTIALL_EN, + EMAC_FLAG_HW_LOOPBACK_EN, + EMAC_FLAG_HW_PTP_CAP, + EMAC_FLAG_HW_PTP_EN, + EMAC_FLAG_HW_TS_RX_EN, + EMAC_FLAG_HW_TS_TX_EN, +}; + +enum emac_adapter_flags { + EMAC_FLAG_ADPT_STATE_RESETTING, + EMAC_FLAG_ADPT_STATE_DOWN, + EMAC_FLAG_ADPT_STATE_WATCH_DOG, + EMAC_FLAG_ADPT_TASK_REINIT_REQ, + EMAC_FLAG_ADPT_TASK_LSC_REQ, + EMAC_FLAG_ADPT_TASK_CHK_SGMII_REQ, +}; + +/* emac shorthand bitops macros */ +#define TEST_FLAG(OBJ, FLAG) test_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags)) +#define SET_FLAG(OBJ, FLAG) set_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags)) +#define CLR_FLAG(OBJ, FLAG) clear_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags)) +#define TEST_N_SET_FLAG(OBJ, FLAG) \ + test_and_set_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags)) + +struct emac_hw { + void __iomem *reg_addr[NUM_EMAC_REG_BASES]; + + u16 devid; + u16 revid; + + /* ring parameter */ + u8 tpd_burst; + u8 rfd_burst; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + enum emac_dma_req_block dmar_block; + enum emac_dma_req_block dmaw_block; + enum emac_dma_order dma_order; + + /* RSS parameter */ + u8 rss_hstype; + u8 rss_base_cpu; + u16 rss_idt_size; + u32 rss_idt[32]; + u8 rss_key[40]; + bool rss_initialized; + + /* 1588 parameter */ + enum emac_ptp_clk_mode ptp_clk_mode; + enum emac_ptp_mode ptp_mode; + u32 ptp_intr_mask; + spinlock_t ptp_lock; /* sync access to ptp hw */ + u32 tstamp_rx_offset; + u32 tstamp_tx_offset; + void *frac_ns_adj_tbl; + u32 frac_ns_adj_tbl_sz; + s32 frac_ns_adj; + + u32 irq_mod; + u32 preamble; + unsigned long flags; +}; + +/* RSS hstype Definitions */ +#define EMAC_RSS_HSTYP_IPV4_EN 0x00000001 +#define EMAC_RSS_HSTYP_TCP4_EN 0x00000002 +#define EMAC_RSS_HSTYP_IPV6_EN 0x00000004 +#define EMAC_RSS_HSTYP_TCP6_EN 0x00000008 +#define EMAC_RSS_HSTYP_ALL_EN (\ + EMAC_RSS_HSTYP_IPV4_EN |\ + EMAC_RSS_HSTYP_TCP4_EN |\ + EMAC_RSS_HSTYP_IPV6_EN |\ + EMAC_RSS_HSTYP_TCP6_EN) + +/******************************************************************************/ +/* Logging functions and macros */ +#define emac_err(_adpt, _format, ...) \ + netdev_err((_adpt)->netdev, _format, ##__VA_ARGS__) + +#define emac_info(_adpt, _mlevel, _netdev, _format, ...) \ + netif_info(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__) + +#define emac_warn(_adpt, _mlevel, _netdev, _format, ...) \ + netif_warn(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__) + +#define emac_dbg(_adpt, _mlevel, _netdev, _format, ...) \ + netif_dbg(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__) + +#define EMAC_DEF_RX_BUF_SIZE 1536 +#define EMAC_MAX_JUMBO_PKT_SIZE (9 * 1024) +#define EMAC_MAX_TX_OFFLOAD_THRESH (9 * 1024) + +#define EMAC_MAX_ETH_FRAME_SIZE EMAC_MAX_JUMBO_PKT_SIZE +#define EMAC_MIN_ETH_FRAME_SIZE 68 + +#define EMAC_MAX_TX_QUEUES 4 +#define EMAC_DEF_TX_QUEUES 1 +#define EMAC_ACTIVE_TXQ 0 + +#define EMAC_MAX_RX_QUEUES 4 +#define EMAC_DEF_RX_QUEUES 1 + +#define EMAC_MIN_TX_DESCS 128 +#define EMAC_MIN_RX_DESCS 128 + +#define EMAC_MAX_TX_DESCS 16383 +#define EMAC_MAX_RX_DESCS 2047 + +#define EMAC_DEF_TX_DESCS 512 +#define EMAC_DEF_RX_DESCS 256 + +#define EMAC_DEF_RX_IRQ_MOD 250 +#define EMAC_DEF_TX_IRQ_MOD 250 + +#define EMAC_WATCHDOG_TIME (5 * HZ) + +/* RRD */ +/* general parameter format of rrd */ +struct emac_sw_rrdes_general { + /* dword 0 */ + u32 xsum:16; + u32 nor:4; /* number of RFD */ + u32 si:12; /* start index of rfd-ring */ + /* dword 1 */ + u32 hash; + /* dword 2 */ + u32 cvlan_tag:16; /* vlan-tag */ + u32 reserved:8; + u32 ptp_timestamp:1; + u32 rss_cpu:3; /* CPU number used by RSS */ + u32 rss_flag:4; /* rss_flag 0, TCP(IPv6) flag for RSS hash algrithm + * rss_flag 1, IPv6 flag for RSS hash algrithm + * rss_flag 2, TCP(IPv4) flag for RSS hash algrithm + * rss_flag 3, IPv4 flag for RSS hash algrithm + */ + /* dword 3 */ + u32 pkt_len:14; /* length of the packet */ + u32 l4f:1; /* L4(TCP/UDP) checksum failed */ + u32 ipf:1; /* IP checksum failed */ + u32 cvlan_flag:1; /* vlan tagged */ + u32 pid:3; + u32 res:1; /* received error summary */ + u32 crc:1; /* crc error */ + u32 fae:1; /* frame alignment error */ + u32 trunc:1; /* truncated packet, larger than MTU */ + u32 runt:1; /* runt packet */ + u32 icmp:1; /* incomplete packet due to insufficient rx-desc*/ + u32 bar:1; /* broadcast address received */ + u32 mar:1; /* multicast address received */ + u32 type:1; /* ethernet type */ + u32 fov:1; /* fifo overflow */ + u32 lene:1; /* length error */ + u32 update:1; /* update */ + + /* dword 4 */ + u32 ts_low:30; + u32 __unused__:2; + /* dword 5 */ + u32 ts_high; +}; + +/* EMAC Errors in emac_sw_rrdesc.dfmt.dw[3] */ +#define EMAC_RRDES_L4F BIT(14) +#define EMAC_RRDES_IPF BIT(15) +#define EMAC_RRDES_CRC BIT(21) +#define EMAC_RRDES_FAE BIT(22) +#define EMAC_RRDES_TRN BIT(23) +#define EMAC_RRDES_RNT BIT(24) +#define EMAC_RRDES_INC BIT(25) +#define EMAC_RRDES_FOV BIT(29) +#define EMAC_RRDES_LEN BIT(30) + +union emac_sw_rrdesc { + struct emac_sw_rrdes_general genr; + + /* dword flat format */ + struct { + u32 dw[6]; + } dfmt; +}; + +/* RFD */ +/* general parameter format of rfd */ +struct emac_sw_rfdes_general { + u64 addr; +}; + +union emac_sw_rfdesc { + struct emac_sw_rfdes_general genr; + + /* dword flat format */ + struct { + u32 dw[2]; + } dfmt; +}; + +/* TPD */ +/* general parameter format of tpd */ +struct emac_sw_tpdes_general { + /* dword 0 */ + u32 buffer_len:16; /* include 4-byte CRC */ + u32 svlan_tag:16; + /* dword 1 */ + u32 l4hdr_offset:8; /* l4 header offset to the 1st byte of packet */ + u32 c_csum:1; + u32 ip_csum:1; + u32 tcp_csum:1; + u32 udp_csum:1; + u32 lso:1; + u32 lso_v2:1; + u32 svtagged:1; /* vlan-id tagged already */ + u32 ins_svtag:1; /* insert vlan tag */ + u32 ipv4:1; /* ipv4 packet */ + u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */ + u32 reserve:12; + u32 epad:1; /* even byte padding when this packet */ + u32 last_frag:1; /* last fragment(buffer) of the packet */ + /* dword 2 */ + u32 addr_lo; + /* dword 3 */ + u32 cvlan_tag:16; + u32 cvtagged:1; + u32 ins_cvtag:1; + u32 addr_hi:13; + u32 tstmp_sav:1; +}; + +/* custom checksum parameter format of tpd */ +struct emac_sw_tpdes_checksum { + /* dword 0 */ + u32 buffer_len:16; + u32 svlan_tag:16; + /* dword 1 */ + u32 payld_offset:8; /* payload offset to the 1st byte of packet */ + u32 c_csum:1; /* do custom checksum offload */ + u32 ip_csum:1; /* do ip(v4) header checksum offload */ + u32 tcp_csum:1; /* do tcp checksum offload, both ipv4 and ipv6 */ + u32 udp_csum:1; /* do udp checksum offload, both ipv4 and ipv6 */ + u32 lso:1; + u32 lso_v2:1; + u32 svtagged:1; /* vlan-id tagged already */ + u32 ins_svtag:1; /* insert vlan tag */ + u32 ipv4:1; /* ipv4 packet */ + u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */ + u32 cxsum_offset:8; /* checksum offset to the 1st byte of packet */ + u32 reserve:4; + u32 epad:1; /* even byte padding when this packet */ + u32 last_frag:1; /* last fragment(buffer) of the packet */ + /* dword 2 */ + u32 addr_lo; + /* dword 3 */ + u32 cvlan_tag:16; + u32 cvtagged:1; + u32 ins_cvtag:1; + u32 addr_hi:14; +}; + +/* tcp large send format (v1/v2) of tpd */ +struct emac_sw_tpdes_tso { + /* dword 0 */ + u32 buffer_len:16; /* include 4-byte CRC */ + u32 svlan_tag:16; + /* dword 1 */ + u32 tcphdr_offset:8; /* tcp hdr offset to the 1st byte of packet */ + u32 c_csum:1; + u32 ip_csum:1; + u32 tcp_csum:1; + u32 udp_csum:1; + u32 lso:1; /* do tcp large send (ipv4 only) */ + u32 lso_v2:1; /* must be 0 in this format */ + u32 svtagged:1; /* vlan-id tagged already */ + u32 ins_svtag:1; /* insert vlan tag */ + u32 ipv4:1; /* ipv4 packet */ + u32 type:1; /* type of packet (ethernet_ii(1) or snap(0)) */ + u32 mss:13; /* mss if do tcp large send */ + u32 last_frag:1; /* last fragment(buffer) of the packet */ + /* dword 2 & 3 */ + u64 pkt_len:32; /* packet length in ext tpd */ + u64 reserve:32; +}; + +union emac_sw_tpdesc { + struct emac_sw_tpdes_general genr; + struct emac_sw_tpdes_checksum csum; + struct emac_sw_tpdes_tso tso; + + /* dword flat format */ + struct { + u32 dw[4]; + } dfmt; +}; + +#define EMAC_RRD(_que, _size, _i) \ + ((_que)->rrd.rrdesc + ((_size) * (_i))) + +#define EMAC_RFD(_que, _size, _i) \ + ((_que)->rfd.rfdesc + ((_size) * (_i))) + +#define EMAC_TPD(_que, _size, _i) \ + ((_que)->tpd.tpdesc + ((_size) * (_i))) + +#define EMAC_TPD_LAST_FRAGMENT 0x80000000 +#define EMAC_TPD_TSTAMP_SAVE 0x80000000 + +/* emac_irq_per_dev per-device (per-adapter) irq properties. + * @idx: index of this irq entry in the adapter irq array. + * @irq: irq number. + * @mask mask to use over status register. + */ +struct emac_irq_per_dev { + int idx; + unsigned int irq; + u32 mask; +}; + +/* emac_irq_common irq properties which are common to all devices of this driver + * @name name in configuration (devicetree). + * @handler ISR. + * @status_reg status register offset. + * @mask_reg mask register offset. + * @init_mask initial value for mask to use over status register. + * @irqflags request_irq() flags. + */ +struct emac_irq_common { + char *name; + irq_handler_t handler; + + u32 status_reg; + u32 mask_reg; + u32 init_mask; + + unsigned long irqflags; +}; + +/* emac_irq_cmn_tbl a table of common irq properties to all devices of this + * driver. + */ +extern const struct emac_irq_common emac_irq_cmn_tbl[]; + +struct emac_clk { + struct clk *clk; + bool enabled; +}; + +struct emac_regulator { + struct regulator *vreg; + int voltage_uv; + bool enabled; +}; + +/* emac_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) + */ +struct emac_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address */ + unsigned int size; /* length in bytes */ + unsigned int used; +}; + +/* emac_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct emac_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + dma_addr_t dma; +}; + +/* receive free descriptor (rfd) ring */ +struct emac_rfd_ring { + struct emac_buffer *rfbuff; + u32 __iomem *rfdesc; /* virtual address */ + dma_addr_t rfdma; /* physical address */ + u64 size; /* length in bytes */ + u32 count; /* number of descriptors in the ring */ + u32 produce_idx; + u32 process_idx; + u32 consume_idx; /* unused */ +}; + +/* receive return descriptor (rrd) ring */ +struct emac_rrd_ring { + u32 __iomem *rrdesc; /* virtual address */ + dma_addr_t rrdma; /* physical address */ + u64 size; /* length in bytes */ + u32 count; /* number of descriptors in the ring */ + u32 produce_idx; /* unused */ + u32 consume_idx; +}; + +/* rx queue */ +struct emac_rx_queue { + struct device *dev; /* device for dma mapping */ + struct net_device *netdev; /* netdev ring belongs to */ + struct emac_rrd_ring rrd; + struct emac_rfd_ring rfd; + struct napi_struct napi; + + u16 que_idx; /* index in multi rx queues*/ + u16 produce_reg; + u32 produce_mask; + u8 produce_shft; + + u16 process_reg; + u32 process_mask; + u8 process_shft; + + u16 consume_reg; + u32 consume_mask; + u8 consume_shft; + + u32 intr; + struct emac_irq_per_dev *irq; +}; + +#define GET_RFD_BUFFER(_rque, _i) (&((_rque)->rfd.rfbuff[(_i)])) + +/* transimit packet descriptor (tpd) ring */ +struct emac_tpd_ring { + struct emac_buffer *tpbuff; + u32 __iomem *tpdesc; /* virtual address */ + dma_addr_t tpdma; /* physical address */ + + u64 size; /* length in bytes */ + u32 count; /* number of descriptors in the ring */ + u32 produce_idx; + u32 consume_idx; + u32 last_produce_idx; +}; + +#define EMAC_HWTXTSTAMP_FIFO_DEPTH 8 +#define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD EMAC_HWTXTSTAMP_FIFO_DEPTH + +/* HW tx timestamp */ +struct emac_hwtxtstamp { + u32 ts_idx; + u32 sec; + u32 ns; +}; + +struct emac_tx_tstamp_stats { + u32 tx; + u32 rx; + u32 deliver; + u32 drop; + u32 lost; + u32 timeout; + u32 sched; + u32 poll; + u32 tx_poll; +}; + +/* tx queue */ +struct emac_tx_queue { + struct device *dev; /* device for dma mapping */ + struct net_device *netdev; /* netdev ring belongs to */ + struct emac_tpd_ring tpd; + + u16 que_idx; /* needed for multiqueue queue management */ + u16 max_packets; /* max packets per interrupt */ + u16 produce_reg; + u32 produce_mask; + u8 produce_shft; + + u16 consume_reg; + u32 consume_mask; + u8 consume_shft; +}; + +#define GET_TPD_BUFFER(_tque, _i) (&((_tque)->tpd.tpbuff[(_i)])) + +/* driver private data structure */ +struct emac_adapter { + struct net_device *netdev; + struct mii_bus *mii_bus; + struct phy_device *phydev; + struct emac_phy phy; + struct emac_hw hw; + struct emac_hw_stats hw_stats; + int irq_status; + + struct emac_irq_per_dev irq[EMAC_IRQ_CNT]; + unsigned int gpio[EMAC_GPIO_CNT]; + struct emac_clk clk[EMAC_CLK_CNT]; + struct emac_regulator vreg[EMAC_VREG_CNT]; + + /* dma parameters */ + u64 dma_mask; + struct device_dma_parameters dma_parms; + + /* All Descriptor memory */ + struct emac_ring_header ring_header; + struct emac_tx_queue tx_queue[EMAC_MAX_TX_QUEUES]; + struct emac_rx_queue rx_queue[EMAC_MAX_RX_QUEUES]; + u16 num_txques; + u16 num_rxques; + + u32 num_txdescs; + u32 num_rxdescs; + u8 rrdesc_size; /* in quad words */ + u8 rfdesc_size; /* in quad words */ + u8 tpdesc_size; /* in quad words */ + + u32 rxbuf_size; + + /* True == use single-pause-frame mode. */ + bool single_pause_mode; + + /* tx timestamping queue */ + struct sk_buff_head hwtxtstamp_pending_queue; + struct sk_buff_head hwtxtstamp_ready_queue; + struct work_struct hwtxtstamp_task; + spinlock_t hwtxtstamp_lock; /* lock for hwtxtstamp */ + struct emac_tx_tstamp_stats hwtxtstamp_stats; + + struct work_struct work_thread; + struct timer_list emac_timer; + unsigned long link_jiffies; + + bool tstamp_en; + u32 wol; + u16 msg_enable; + unsigned long flags; + struct pinctrl *pinctrl; + struct pinctrl_state *mdio_pins_clk_active; + struct pinctrl_state *mdio_pins_clk_sleep; + struct pinctrl_state *mdio_pins_data_active; + struct pinctrl_state *mdio_pins_data_sleep; + struct pinctrl_state *ephy_pins_active; + struct pinctrl_state *ephy_pins_sleep; + int (*gpio_on)(struct emac_adapter *adpt, bool mdio, bool ephy); + int (*gpio_off)(struct emac_adapter *adpt, bool mdio, bool ephy); + struct wakeup_source *link_wlock; + + u32 bus_cl_hdl; + struct msm_bus_scale_pdata *bus_scale_table; +}; + +static inline struct emac_adapter *emac_hw_get_adap(struct emac_hw *hw) +{ + return container_of(hw, struct emac_adapter, hw); +} + +static inline +struct emac_adapter *emac_irq_get_adpt(struct emac_irq_per_dev *irq) +{ + struct emac_irq_per_dev *irq_0 = irq - irq->idx; + /* why using __builtin_offsetof() and not container_of() ? + * container_of(irq_0, struct emac_adapter, irq) fails to compile + * because emac->irq is of array type. + */ + return (struct emac_adapter *) + ((char *)irq_0 - __builtin_offsetof(struct emac_adapter, irq)); +} + +/* default to trying for four seconds */ +#define EMAC_TRY_LINK_TIMEOUT (4 * 1000) + +#define EMAC_HW_CTRL_RESET_MAC 0x00000001 + +void emac_set_ethtool_ops(struct net_device *netdev); +int emac_reinit_locked(struct emac_adapter *adpt); +void emac_update_hw_stats(struct emac_adapter *adpt); +int emac_resize_rings(struct net_device *netdev); +int emac_mac_up(struct emac_adapter *adpt); +void emac_mac_down(struct emac_adapter *adpt, u32 ctrl); +int emac_clk_set_rate(struct emac_adapter *adpt, enum emac_clk_id id, + enum emac_clk_rate rate); +void emac_task_schedule(struct emac_adapter *adpt); +void emac_check_lsc(struct emac_adapter *adpt); +void emac_wol_gpio_irq(struct emac_adapter *adpt, bool enable); + +static inline void *dma_zalloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); + + if (ret) + memset(ret, 0, size); + return ret; +} + +#endif /* _QCOM_EMAC_MAIN_H_ */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_phy.c b/drivers/net/ethernet/qualcomm/emac/emac_phy.c new file mode 100644 index 000000000000..01f895dbf2f0 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_phy.c @@ -0,0 +1,270 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* MSM EMAC PHY Controller driver. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "emac_hw.h" +#include "emac_defines.h" +#include "emac_regs.h" +#include "emac_phy.h" +#include "emac_rgmii.h" +#include "emac_sgmii.h" + +static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct emac_adapter *adpt = bus->priv; + struct emac_hw *hw = &adpt->hw; + u32 reg = 0; + int ret = 0; + + if (pm_runtime_enabled(adpt->netdev->dev.parent) && + pm_runtime_status_suspended(adpt->netdev->dev.parent)) { + emac_dbg(adpt, hw, adpt->netdev, "EMAC in suspended state\n"); + return ret; + } + + emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK, + (addr << PHY_ADDR_SHFT)); + wmb(); /* ensure PHY address is set before we proceed */ + reg = reg & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK | + MDIO_MODE | MDIO_PR); + reg = SUP_PREAMBLE | + ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | + ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | + MDIO_START | MDIO_RD_NWR; + + emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, reg); + mb(); /* ensure hw starts the operation before we check for result */ + + if (readl_poll_timeout(hw->reg_addr[EMAC] + EMAC_MDIO_CTRL, reg, + !(reg & (MDIO_START | MDIO_BUSY)), + 100, MDIO_WAIT_TIMES * 100)) { + emac_err(adpt, "error reading phy addr %d phy reg 0x%02x\n", + addr, regnum); + ret = -EIO; + } else { + ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; + + emac_dbg(adpt, hw, adpt->netdev, "EMAC PHY ADDR %d PHY RD 0x%02x -> 0x%04x\n", + addr, regnum, ret); + } + return ret; +} + +static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + struct emac_adapter *adpt = bus->priv; + struct emac_hw *hw = &adpt->hw; + u32 reg = 0; + int ret = 0; + + if (pm_runtime_enabled(adpt->netdev->dev.parent) && + pm_runtime_status_suspended(adpt->netdev->dev.parent)) { + emac_dbg(adpt, hw, adpt->netdev, "EMAC in suspended state\n"); + return ret; + } + + emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK, + (addr << PHY_ADDR_SHFT)); + wmb(); /* ensure PHY address is set before we proceed */ + + reg = reg & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK | + MDIO_DATA_BMSK | MDIO_MODE | MDIO_PR); + reg = SUP_PREAMBLE | + ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | + ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | + ((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) | + MDIO_START; + + emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, reg); + mb(); /* ensure hw starts the operation before we check for result */ + + if (readl_poll_timeout(hw->reg_addr[EMAC] + EMAC_MDIO_CTRL, reg, + !(reg & (MDIO_START | MDIO_BUSY)), 100, + MDIO_WAIT_TIMES * 100)) { + emac_err(adpt, "error writing phy addr %d phy reg 0x%02x data 0x%02x\n", + addr, regnum, val); + ret = -EIO; + } else { + emac_dbg(adpt, hw, adpt->netdev, "EMAC PHY Addr %d PHY WR 0x%02x <- 0x%04x\n", + addr, regnum, val); + } + + return ret; +} + +int emac_phy_config_fc(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_hw *hw = &adpt->hw; + u32 mac; + + if (phy->disable_fc_autoneg || !phy->external) + phy->cur_fc_mode = phy->req_fc_mode; + + mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL); + + switch (phy->cur_fc_mode) { + case EMAC_FC_NONE: + mac &= ~(RXFC | TXFC); + break; + case EMAC_FC_RX_PAUSE: + mac &= ~TXFC; + mac |= RXFC; + break; + case EMAC_FC_TX_PAUSE: + mac |= TXFC; + mac &= ~RXFC; + break; + case EMAC_FC_FULL: + case EMAC_FC_DEFAULT: + mac |= (TXFC | RXFC); + break; + default: + emac_err(adpt, "flow control param set incorrectly\n"); + return -EINVAL; + } + + emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac); + /* ensure flow control config is slushed to hw */ + wmb(); + return 0; +} + +/* Configure the MDIO bus and connect the external PHY */ +int emac_phy_config_external(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *mii_bus; + int ret; + u32 phy_id = 0; + + /* Create the mii_bus object for talking to the MDIO bus */ + mii_bus = devm_mdiobus_alloc(&pdev->dev); + adpt->mii_bus = mii_bus; + + if (!mii_bus) + return -ENOMEM; + + mii_bus->name = "emac-mdio"; + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + mii_bus->read = emac_mdio_read; + mii_bus->write = emac_mdio_write; + mii_bus->parent = &pdev->dev; + mii_bus->priv = adpt; + + if (ACPI_COMPANION(&pdev->dev)) { + u32 phy_addr; + + ret = mdiobus_register(mii_bus); + if (ret) { + emac_err(adpt, "could not register mdio bus\n"); + return ret; + } + ret = device_property_read_u32(&pdev->dev, "phy-channel", + &phy_addr); + if (ret) { + /* If we can't read a valid phy address, then assume + * that there is only one phy on this mdio bus. + */ + adpt->phydev = phy_find_first(mii_bus); + } else { + emac_err(adpt, "could not get external phy dev\n"); + adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr); + } + } else { + struct device_node *phy_np; + //struct module *at803x_module = NULL; + //at803x_module = find_module("Qualcomm Technologies, Inc. Atheros AR8031/AR8033"); + ret = of_mdiobus_register(mii_bus, np); + //ret = __of_mdiobus_register(mii_bus, np, at803x_module); + if (ret) { + emac_err(adpt, "could not register mdio bus\n"); + return ret; + } + + phy_np = of_parse_phandle(np, "phy-handle", 0); + adpt->phydev = of_phy_find_device(phy_np); + of_node_put(phy_np); + } + if (!adpt->phydev) { + emac_err(adpt, "could not find external phy\n"); + mdiobus_unregister(mii_bus); + return -ENODEV; + } + phy_id = adpt->phydev->phy_id; + /*if (adpt->phydev->phy_id == (u32)0) { + * emac_err(adpt, "External phy is not up\n"); + * mdiobus_unregister(mii_bus); + * return -EPROBE_DEFER; + * } + */ + + if (adpt->phydev->drv) { + emac_dbg(adpt, probe, adpt->netdev, "attached PHY driver [%s] ", + adpt->phydev->drv->name); + emac_dbg(adpt, probe, adpt->netdev, "(mii_bus:phy_addr=%s, irq=%d)\n", + dev_name(&adpt->phydev->mdio.dev), adpt->phydev->irq); + } + /* Set initial link status to false */ + adpt->phydev->link = 0; + return 0; +} + +int emac_phy_config_internal(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct device_node *dt = pdev->dev.of_node; + int ret; + + phy->external = !of_property_read_bool(dt, "qcom,no-external-phy"); + + /* Get the link mode */ + ret = of_get_phy_mode(dt, &phy->phy_interface); + if (ret < 0) { + emac_err(adpt, "unknown phy mode: %s\n", phy_modes(ret)); + return ret; + } + + switch (phy->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + phy->ops = emac_rgmii_ops; + break; + case PHY_INTERFACE_MODE_SGMII: + phy->ops = emac_sgmii_ops; + break; + default: + emac_err(adpt, "unsupported phy mode: %s\n", phy_modes(ret)); + return -EINVAL; + } + + ret = phy->ops.config(pdev, adpt); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac_phy.h b/drivers/net/ethernet/qualcomm/emac/emac_phy.h new file mode 100644 index 000000000000..53853142114c --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_phy.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EMAC_PHY_H__ +#define __EMAC_PHY_H__ + +#include +#include + +struct emac_adapter; + +struct emac_phy_ops { + int (*config)(struct platform_device *pdev, struct emac_adapter *adpt); + void (*reset)(struct emac_adapter *adpt); + int (*up)(struct emac_adapter *adpt); + void (*down)(struct emac_adapter *adpt); + int (*link_setup_no_ephy)(struct emac_adapter *adpt); + int (*link_check_no_ephy)(struct emac_adapter *adpt, + struct phy_device *phydev); + void (*tx_clk_set_rate)(struct emac_adapter *adpt); + void (*periodic_task)(struct emac_adapter *adpt); +}; + +enum emac_flow_ctrl { + EMAC_FC_NONE, + EMAC_FC_RX_PAUSE, + EMAC_FC_TX_PAUSE, + EMAC_FC_FULL, + EMAC_FC_DEFAULT +}; + +enum emac_phy_map_type { + EMAC_PHY_MAP_DEFAULT = 0, + EMAC_PHY_MAP_MDM9607, + EMAC_PHY_MAP_V2, + EMAC_PHY_MAP_NUM, +}; + +/* emac_phy - internal emac phy + * @addr mii address + * @id vendor id + * @cur_fc_mode flow control mode in effect + * @req_fc_mode flow control mode requested by caller + * @disable_fc_autoneg Do not auto-negotiate flow control + */ +struct emac_phy { + phy_interface_t phy_interface; + u32 phy_version; + bool external; + struct emac_phy_ops ops; + + void *private; + + /* flow control configuration */ + enum emac_flow_ctrl cur_fc_mode; + enum emac_flow_ctrl req_fc_mode; + bool disable_fc_autoneg; + enum emac_phy_map_type board_id; + + int link_up; + int link_speed; + int link_duplex; + int link_pause; + + bool is_wol_irq_reg; + bool is_wol_enabled; + spinlock_t wol_irq_lock; /* lock for wol irq gpio enablement */ + bool is_ext_phy_connect; +}; + +int emac_phy_config_internal(struct platform_device *pdev, + struct emac_adapter *adpt); +int emac_phy_config_external(struct platform_device *pdev, + struct emac_adapter *adpt); +int emac_phy_setup_link(struct emac_adapter *adpt, u32 speed, bool autoneg, + bool fc); +int emac_phy_setup_link_speed(struct emac_adapter *adpt, u32 speed, + bool autoneg, bool fc); +int emac_phy_check_link(struct emac_adapter *adpt, u32 *speed, bool *link_up); +int emac_phy_get_lpa_speed(struct emac_adapter *adpt, u32 *speed); +int emac_phy_config_fc(struct emac_adapter *adpt); +void emac_phy_reset_external(struct emac_adapter *adpt); +#endif /* __EMAC_PHY_H__ */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_ptp.c b/drivers/net/ethernet/qualcomm/emac/emac_ptp.c new file mode 100644 index 000000000000..4a96aff20079 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_ptp.c @@ -0,0 +1,901 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC Ethernet Controller driver. + */ + +#include +#include +#include +#include "emac_main.h" +#include "emac_hw.h" +#include "emac_ptp.h" + +#define RTC_INC_FRAC_NS_BMSK 0x03ffffff +#define RTC_INC_FRAC_NS_SHFT 0 +#define RTC_INC_NS_BMSK 0xfc000000 +#define RTC_INC_NS_SHFT 26 +#define RTC_NUM_FRAC_NS_PER_NS BIT(RTC_INC_NS_SHFT) + +#define TS_TX_FIFO_SYNC_RST (TX_INDX_FIFO_SYNC_RST | TX_TS_FIFO_SYNC_RST) +#define TS_RX_FIFO_SYNC_RST (RX_TS_FIFO1_SYNC_RST | RX_TS_FIFO2_SYNC_RST) +#define TS_FIFO_SYNC_RST (TS_TX_FIFO_SYNC_RST | TS_RX_FIFO_SYNC_RST) + +struct emac_tstamp_hw_delay { + int phy_mode; + u32 speed; + u32 tx; + u32 rx; +}; + +struct emac_ptp_frac_ns_adj { + u32 ref_clk_rate; + s32 adj_val; +}; + +static const struct emac_tstamp_hw_delay emac_ptp_hw_delay[] = { + { PHY_INTERFACE_MODE_SGMII, SPEED_1000, 16, 60 }, + { PHY_INTERFACE_MODE_SGMII, SPEED_100, 280, 100 }, + { PHY_INTERFACE_MODE_SGMII, SPEED_10, 2400, 400 }, + { 0 } +}; + +static inline u32 get_rtc_ref_clkrate(struct emac_hw *hw) +{ + struct emac_adapter *adpt = emac_hw_get_adap(hw); + + return clk_get_rate(adpt->clk[EMAC_CLK_HIGH_SPEED].clk); +} + +static inline bool is_valid_frac_ns_adj(s32 val) +{ + if (val >= RTC_NUM_FRAC_NS_PER_NS || (val <= -RTC_NUM_FRAC_NS_PER_NS)) + return false; + + return true; +} + +static s32 get_frac_ns_adj_from_tbl(struct emac_hw *hw) +{ + const struct emac_ptp_frac_ns_adj *tbl = hw->frac_ns_adj_tbl; + u32 clk = get_rtc_ref_clkrate(hw); + s32 val = 0; + int i; + + for (i = 0; tbl && i < hw->frac_ns_adj_tbl_sz; i++) { + if (tbl[i].ref_clk_rate == clk) { + if (is_valid_frac_ns_adj(tbl[i].adj_val)) + val = tbl[i].adj_val; + break; + } + } + + return val; +} + +static int emac_hw_set_rtc_inc_value(struct emac_hw *hw, s32 adj) +{ + u32 clk = get_rtc_ref_clkrate(hw); + u32 ns, frac, rem, inc; + u64 v; + + ns = div_u64_rem(1000000000LL, clk, &rem); + v = (u64)rem << RTC_INC_NS_SHFT; + frac = div_u64(v, clk); + + if (adj) { + s32 res; + + res = (s32)frac + adj; + if (res < 0) { + ns--; + res += RTC_NUM_FRAC_NS_PER_NS; + } else if (res >= RTC_NUM_FRAC_NS_PER_NS) { + ns++; + res -= RTC_NUM_FRAC_NS_PER_NS; + } + frac = (u32)res; + } + + inc = (ns << RTC_INC_NS_SHFT) | frac; + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_INC_VALUE_2, + (inc >> 16) & INC_VALUE_2_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_INC_VALUE_1, + inc & INC_VALUE_1_BMSK); + wmb(); /* ensure P1588_INC_VALUE is set before we proceed */ + + return 0; +} + +static const struct emac_tstamp_hw_delay *emac_get_ptp_hw_delay(u32 link_speed, + int phy_mode) +{ + const struct emac_tstamp_hw_delay *info = emac_ptp_hw_delay; + + for (info = emac_ptp_hw_delay; info->phy_mode; info++) { + if (info->phy_mode == phy_mode && info->speed == link_speed) + return info; + } + + return NULL; +} + +static int emac_hw_adjust_tstamp_offset(struct emac_hw *hw, + enum emac_ptp_clk_mode clk_mode, + u32 link_speed) +{ + const struct emac_tstamp_hw_delay *delay_info; + struct emac_phy *phy = &emac_hw_get_adap(hw)->phy; + + delay_info = emac_get_ptp_hw_delay(link_speed, phy->phy_interface); + + if (clk_mode == emac_ptp_clk_mode_oc_one_step) { + u32 latency = (delay_info) ? delay_info->tx : 0; + + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_TX_LATENCY, + TX_LATENCY_BMSK, latency << TX_LATENCY_SHFT); + wmb(); /* ensure that the latency time is flushed to HW */ + } + + if (delay_info) { + hw->tstamp_rx_offset = delay_info->rx; + hw->tstamp_tx_offset = delay_info->tx; + } else { + hw->tstamp_rx_offset = 0; + hw->tstamp_tx_offset = 0; + } + + return 0; +} + +static int emac_hw_config_tx_tstamp(struct emac_hw *hw, bool enable) +{ + if (enable) { + /* Reset the TX timestamp FIFO */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_TX_FIFO_SYNC_RST, TS_TX_FIFO_SYNC_RST); + wmb(); /* ensure that the Tx timestamp reset is flushed to HW */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_TX_FIFO_SYNC_RST, 0); + wmb(); /* ensure that the Tx timestamp is out of reset */ + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TX_TS_ENABLE, TX_TS_ENABLE); + wmb(); /* ensure enabling the Tx timestamp is flushed to HW */ + SET_FLAG(hw, HW_TS_TX_EN); + } else { + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TX_TS_ENABLE, 0); + wmb(); /* ensure disabling the Tx timestamp is flushed to HW */ + CLR_FLAG(hw, HW_TS_TX_EN); + } + + return 0; +} + +static int emac_hw_config_rx_tstamp(struct emac_hw *hw, bool enable) +{ + if (enable) { + /* Reset the RX timestamp FIFO */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_RX_FIFO_SYNC_RST, TS_RX_FIFO_SYNC_RST); + wmb(); /* ensure that the Rx timestamp reset is flushed to HW */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_RX_FIFO_SYNC_RST, 0); + wmb(); /* ensure that the Rx timestamp is out of reset */ + + SET_FLAG(hw, HW_TS_RX_EN); + } else { + CLR_FLAG(hw, HW_TS_RX_EN); + } + + return 0; +} + +static int emac_hw_1588_core_disable(struct emac_hw *hw) +{ + if (TEST_FLAG(hw, HW_TS_RX_EN)) + emac_hw_config_rx_tstamp(hw, false); + if (TEST_FLAG(hw, HW_TS_TX_EN)) + emac_hw_config_tx_tstamp(hw, false); + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + DIS_1588_CLKS, DIS_1588_CLKS); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10, + DIS_1588, DIS_1588); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, + BYPASS_O, BYPASS_O); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, 0); + wmb(); /* ensure that disabling PTP is flushed to HW */ + + CLR_FLAG(hw, HW_PTP_EN); + return 0; +} + +static int emac_hw_1588_core_enable(struct emac_hw *hw, + enum emac_ptp_mode mode, + enum emac_ptp_clk_mode clk_mode, + u32 link_speed, + s32 frac_ns_adj) +{ + if (clk_mode != emac_ptp_clk_mode_oc_one_step && + clk_mode != emac_ptp_clk_mode_oc_two_step) { + struct emac_adapter *adpt = emac_hw_get_adap(hw); + + emac_dbg(emac_hw_get_adap(hw), hw, adpt->netdev, "invalid ptp clk mode %d\n", + clk_mode); + return -EINVAL; + } + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + DIS_1588_CLKS, 0); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10, DIS_1588, 0); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, BYPASS_O, 0); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, 0); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG, + RTC_READ_MODE, RTC_READ_MODE); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ATTACH_EN, 0); + wmb(); /* ensure P1588_CTRL_REG is set before we proceed */ + + emac_hw_adjust_tstamp_offset(hw, clk_mode, link_speed); + + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, CLOCK_MODE_BMSK, + (clk_mode << CLOCK_MODE_SHFT)); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ETH_MODE_SW, + (link_speed == EMAC_LINK_SPEED_1GB_FULL) ? + 0 : ETH_MODE_SW); + + /* set RTC increment every 8ns to fit 125MHZ clock */ + emac_hw_set_rtc_inc_value(hw, frac_ns_adj); + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10, + RD_CLR_1588, RD_CLR_1588); + wmb(); /* ensure clear-on-read is enabled on PTP config registers */ + + emac_reg_r32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_STATUS); + + /* Reset the timestamp FIFO */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_FIFO_SYNC_RST, TS_FIFO_SYNC_RST); + wmb(); /* ensure timestamp reset is complete */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, + TS_FIFO_SYNC_RST, 0); + wmb(); /* ensure timestamp is out of reset */ + + if (mode == emac_ptp_mode_master) + emac_reg_update32(hw, EMAC_1588, + EMAC_P1588_GRAND_MASTER_CONFIG_0, + GRANDMASTER_MODE | GM_PPS_SYNC, + GRANDMASTER_MODE); + else + emac_reg_update32(hw, EMAC_1588, + EMAC_P1588_GRAND_MASTER_CONFIG_0, + GRANDMASTER_MODE | GM_PPS_SYNC, 0); + wmb(); /* ensure gradmaster mode setting is flushed to HW */ + + SET_FLAG(hw, HW_PTP_EN); + return 0; +} + +static void rtc_settime(struct emac_hw *hw, const struct timespec64 *ts) +{ + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_5, 0); + + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_4, + (ts->tv_sec >> 16) & RTC_PRELOADED_4_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_3, + ts->tv_sec & RTC_PRELOADED_3_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_2, + (ts->tv_nsec >> 16) & RTC_PRELOADED_2_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_1, + ts->tv_nsec & RTC_PRELOADED_1_BMSK); + + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG, + LOAD_RTC, LOAD_RTC); + wmb(); /* ensure RTC setting is flushed to HW */ +} + +static void rtc_gettime(struct emac_hw *hw, struct timespec64 *ts) +{ + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG, + RTC_SNAPSHOT, RTC_SNAPSHOT); + wmb(); /* ensure snapshot is saved before reading it back */ + + ts->tv_sec = emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_5, + REAL_TIME_5_BMSK, REAL_TIME_5_SHFT); + ts->tv_sec = (u64)ts->tv_sec << 32; + ts->tv_sec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_4, + REAL_TIME_4_BMSK, REAL_TIME_4_SHFT); + ts->tv_sec <<= 16; + ts->tv_sec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_3, + REAL_TIME_3_BMSK, REAL_TIME_3_SHFT); + + ts->tv_nsec = emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_2, + REAL_TIME_2_BMSK, REAL_TIME_2_SHFT); + ts->tv_nsec <<= 16; + ts->tv_nsec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_1, + REAL_TIME_1_BMSK, REAL_TIME_1_SHFT); +} + +static void rtc_adjtime(struct emac_hw *hw, s64 delta) +{ + s32 delta_ns; + s32 delta_sec; + + delta_sec = div_s64_rem(delta, 1000000000LL, &delta_ns); + + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_3, 0); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_2, + (delta_sec >> 16) & SEC_OFFSET_2_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_1, + delta_sec & SEC_OFFSET_1_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_NANO_OFFSET_2, + (delta_ns >> 16) & NANO_OFFSET_2_BMSK); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_NANO_OFFSET_1, + (delta_ns & NANO_OFFSET_1_BMSK)); + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_ADJUST_RTC, 1); + wmb(); /* ensure that RTC adjustment is flushed to HW */ +} + +static void rtc_ns_sync_pps_in(struct emac_hw *hw) +{ + u32 ts; + s64 delta = 0; + + ts = emac_reg_r32(hw, EMAC_1588, EMAC_P1588_GM_PPS_TIMESTAMP_2); + ts <<= 16; + + ts |= emac_reg_r32(hw, EMAC_1588, EMAC_P1588_GM_PPS_TIMESTAMP_1); + + if (ts < 500000000) + delta = 0LL - (s64)ts; + else + delta = 1000000000LL - (s64)ts; + + if (delta) { + struct emac_adapter *adpt = emac_hw_get_adap(hw); + + rtc_adjtime(hw, delta); + emac_dbg(emac_hw_get_adap(hw), intr, adpt->netdev, + "RTC_SYNC: gm_pps_tstamp_ns 0x%08x, adjust %lldns\n", + ts, delta); + } +} + +static void emac_ptp_rtc_ns_sync(struct emac_hw *hw) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + rtc_ns_sync_pps_in(hw); + spin_unlock_irqrestore(&hw->ptp_lock, flag); +} + +int emac_ptp_config(struct emac_hw *hw) +{ + struct timespec64 ts; + int ret = 0; + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + + if (TEST_FLAG(hw, HW_PTP_EN)) + goto unlock_out; + + hw->frac_ns_adj = get_frac_ns_adj_from_tbl(hw); + ret = emac_hw_1588_core_enable(hw, + hw->ptp_mode, + hw->ptp_clk_mode, + SPEED_1000, + hw->frac_ns_adj); + if (ret) + goto unlock_out; + + ktime_get_real_ts64(&ts); + rtc_settime(hw, &ts); + + emac_hw_get_adap(hw)->irq[0].mask |= PTP_INT; + hw->ptp_intr_mask = PPS_IN; + +unlock_out: + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return ret; +} + +int emac_ptp_stop(struct emac_hw *hw) +{ + int ret = 0; + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + + if (TEST_FLAG(hw, HW_PTP_EN)) + ret = emac_hw_1588_core_disable(hw); + + hw->ptp_intr_mask = 0; + emac_hw_get_adap(hw)->irq[0].mask &= ~PTP_INT; + + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return ret; +} + +int emac_ptp_set_linkspeed(struct emac_hw *hw, u32 link_speed) +{ + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ETH_MODE_SW, + (link_speed == SPEED_1000) ? 0 : + ETH_MODE_SW); + wmb(); /* ensure ETH_MODE_SW is set before we proceed */ + emac_hw_adjust_tstamp_offset(hw, hw->ptp_clk_mode, link_speed); + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return 0; +} + +void emac_ptp_intr(struct emac_hw *hw) +{ + u32 isr, status; + struct emac_adapter *adpt = emac_hw_get_adap(hw); + + isr = emac_reg_r32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_STATUS); + status = isr & hw->ptp_intr_mask; + + emac_dbg(emac_hw_get_adap(hw), intr, adpt->netdev, + "receive ptp interrupt: isr 0x%x\n", isr); + + if (status & PPS_IN) + emac_ptp_rtc_ns_sync(hw); +} + +static int emac_ptp_settime(struct emac_hw *hw, const struct timespec64 *ts) +{ + int ret = 0; + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + if (!TEST_FLAG(hw, HW_PTP_EN)) + ret = -EPERM; + else + rtc_settime(hw, ts); + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return ret; +} + +static int emac_ptp_gettime(struct emac_hw *hw, struct timespec64 *ts) +{ + int ret = 0; + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + if (!TEST_FLAG(hw, HW_PTP_EN)) + ret = -EPERM; + else + rtc_gettime(hw, ts); + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return ret; +} + +int emac_ptp_adjtime(struct emac_hw *hw, s64 delta) +{ + int ret = 0; + unsigned long flag = 0; + + spin_lock_irqsave(&hw->ptp_lock, flag); + if (!TEST_FLAG(hw, HW_PTP_EN)) + ret = -EPERM; + else + rtc_adjtime(hw, delta); + spin_unlock_irqrestore(&hw->ptp_lock, flag); + + return ret; +} + +int emac_tstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + struct hwtstamp_config cfg; + + if (!TEST_FLAG(hw, HW_PTP_EN)) + return -EPERM; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_OFF: + emac_hw_config_tx_tstamp(hw, false); + break; + case HWTSTAMP_TX_ON: + if (TEST_FLAG(hw, HW_TS_TX_EN)) + break; + + emac_hw_config_tx_tstamp(hw, true); + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + emac_hw_config_rx_tstamp(hw, false); + break; + default: + cfg.rx_filter = HWTSTAMP_FILTER_ALL; + if (TEST_FLAG(hw, HW_TS_RX_EN)) + break; + + emac_hw_config_rx_tstamp(hw, true); + break; + } + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? + -EFAULT : 0; +} + +static ssize_t emac_ptp_sysfs_tstamp_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + struct timespec64 ts; + int ret; + + ktime_get_real_ts64(&ts); + ret = emac_ptp_settime(&adpt->hw, &ts); + if (!ret) + ret = count; + + return ret; +} + +static ssize_t emac_ptp_sysfs_tstamp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + struct timespec64 ts = { 0 }; + struct timespec64 ts_now = { 0 }; + int count = PAGE_SIZE; + ssize_t retval; + + retval = emac_ptp_gettime(&adpt->hw, &ts); + if (retval) + return retval; + + ktime_get_real_ts64(&ts_now); + retval = scnprintf(buf, count, + "%12u.%09u tstamp %12u.%08u time-of-day\n", + (int)ts.tv_sec, (int)ts.tv_nsec, + (int)ts_now.tv_sec, (int)ts_now.tv_nsec); + + return retval; +} + +/* display ethernet mac time as well as the time of the next mac pps pulse */ +static ssize_t emac_ptp_sysfs_mtnp_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + struct timespec64 ts; + ssize_t ret; + + ret = emac_ptp_gettime(&adpt->hw, &ts); + if (ret) + return ret; + + return scnprintf(buf, count, "%ld %ld %d %ld\n", + ts.tv_sec, + ts.tv_nsec, + (ts.tv_nsec == 0) ? 1 : 0, + (ts.tv_nsec == 0) ? 0 : (NSEC_PER_SEC - ts.tv_nsec)); +} + +/* Do a "slam" of a very particular time into the time registers... */ +static ssize_t emac_ptp_sysfs_slam(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + u32 sec = 0; + u32 nsec = 0; + ssize_t ret = -EINVAL; + + if (sscanf(buf, "%u %u", &sec, &nsec) == 2) { + struct timespec64 ts = {sec, nsec}; + + ret = emac_ptp_settime(&adpt->hw, &ts); + if (ret) { + pr_err("%s: emac_ptp_settime failed.\n", __func__); + return ret; + } + ret = count; + } else { + pr_err("%s: sscanf failed.\n", __func__); + } + + return ret; +} + +/* Do a coarse time ajustment (ie. coarsely adjust (+/-) the time + * registers by the passed offset) + */ +static ssize_t emac_ptp_sysfs_cadj(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + s64 offset = 0; + ssize_t ret = -EINVAL; + + if (!kstrtos64(buf, 10, &offset)) { + struct timespec64 ts; + u64 new_offset; + u32 sec; + u32 nsec; + + ret = emac_ptp_gettime(&adpt->hw, &ts); + if (ret) { + pr_err("%s: emac_ptp_gettime failed.\n", __func__); + return ret; + } + + sec = ts.tv_sec; + nsec = ts.tv_nsec; + + new_offset = (((uint64_t)sec * NSEC_PER_SEC) + + (uint64_t)nsec) + offset; + + nsec = do_div(new_offset, NSEC_PER_SEC); + sec = new_offset; + + ts.tv_sec = sec; + ts.tv_nsec = nsec; + + ret = emac_ptp_settime(&adpt->hw, &ts); + if (ret) { + pr_err("%s: emac_ptp_settime failed.\n", __func__); + return ret; + } + ret = count; + } else { + pr_err("%s: sscanf failed.\n", __func__); + } + + return ret; +} + +/* Do a fine time ajustment (ie. have the timestamp registers adjust + * themselves by the passed amount). + */ +static ssize_t emac_ptp_sysfs_fadj(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + s64 offset = 0; + ssize_t ret = -EINVAL; + + if (!kstrtos64(buf, 10, &offset)) { + ret = emac_ptp_adjtime(&adpt->hw, offset); + if (ret) { + pr_err("%s: emac_ptp_adjtime failed.\n", __func__); + return ret; + } + ret = count; + } else { + pr_err("%s: sscanf failed.\n", __func__); + } + + return ret; +} + +static ssize_t emac_ptp_sysfs_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + int count = PAGE_SIZE; + ssize_t ret; + + ret = scnprintf(buf, count, "%s\n", + (adpt->hw.ptp_mode == emac_ptp_mode_master) ? + "master" : "slave"); + + return ret; +} + +static ssize_t emac_ptp_sysfs_mode_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + struct emac_hw *hw = &adpt->hw; + struct phy_device *phydev = adpt->phydev; + enum emac_ptp_mode mode; + + if (!strcmp(buf, "master")) + mode = emac_ptp_mode_master; + else if (!strcmp(buf, "slave")) + mode = emac_ptp_mode_slave; + else + return -EINVAL; + + if (mode == hw->ptp_mode) + goto out; + + if (TEST_FLAG(hw, HW_PTP_EN)) { + bool rx_tstamp_enable = TEST_FLAG(hw, HW_TS_RX_EN); + bool tx_tstamp_enable = TEST_FLAG(hw, HW_TS_TX_EN); + + emac_hw_1588_core_disable(hw); + emac_hw_1588_core_enable(hw, mode, hw->ptp_clk_mode, + phydev->speed, hw->frac_ns_adj); + if (rx_tstamp_enable) + emac_hw_config_rx_tstamp(hw, true); + if (tx_tstamp_enable) + emac_hw_config_tx_tstamp(hw, true); + + emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, + hw->ptp_intr_mask); + wmb(); /* ensure PTP_EXPANDED_INT_MASK is set */ + } + + hw->ptp_mode = mode; + +out: + return count; +} + +static ssize_t emac_ptp_sysfs_frac_ns_adj_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + struct emac_hw *hw = &adpt->hw; + int count = PAGE_SIZE; + ssize_t ret; + + if (!TEST_FLAG(hw, HW_PTP_EN)) + return -EPERM; + + ret = scnprintf(buf, count, "%d\n", adpt->hw.frac_ns_adj); + + return ret; +} + +static ssize_t emac_ptp_sysfs_frac_ns_adj_set(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct emac_adapter *adpt = netdev_priv(to_net_dev(dev)); + struct emac_hw *hw = &adpt->hw; + s32 adj; + + if (!TEST_FLAG(hw, HW_PTP_EN)) + return -EPERM; + + if (kstrtos32(buf, 0, &adj)) + return -EINVAL; + + if (!is_valid_frac_ns_adj(adj)) + return -EINVAL; + + emac_hw_set_rtc_inc_value(hw, adj); + hw->frac_ns_adj = adj; + + return count; +} + +static struct device_attribute ptp_sysfs_devattr[] = { + __ATTR(tstamp, 0660, + emac_ptp_sysfs_tstamp_show, emac_ptp_sysfs_tstamp_set), + __ATTR(mtnp, 0440, emac_ptp_sysfs_mtnp_show, NULL), + __ATTR(slam, 0220, NULL, emac_ptp_sysfs_slam), + __ATTR(cadj, 0220, NULL, emac_ptp_sysfs_cadj), + __ATTR(fadj, 0220, NULL, emac_ptp_sysfs_fadj), + __ATTR(frac_ns_adj, 0660, + emac_ptp_sysfs_frac_ns_adj_show, emac_ptp_sysfs_frac_ns_adj_set), + __ATTR(ptp_mode, 0660, + emac_ptp_sysfs_mode_show, emac_ptp_sysfs_mode_set), + __ATTR_NULL +}; + +static void emac_ptp_sysfs_create(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct device_attribute *devattr; + + for (devattr = ptp_sysfs_devattr; devattr->attr.name; devattr++) { + if (device_create_file(&netdev->dev, devattr)) { + emac_err(adpt, + "emac_ptp: failed to create sysfs files\n"); + break; + } + } +} + +static void emac_ptp_of_get_property(struct emac_adapter *adpt) +{ + struct emac_hw *hw = &adpt->hw; + struct device *parent = adpt->netdev->dev.parent; + struct device_node *node = parent->of_node; + const int *tbl; + struct emac_ptp_frac_ns_adj *adj_tbl = NULL; + int size, tbl_size; + + if (of_property_read_bool(node, "qcom,emac-ptp-grandmaster")) + hw->ptp_mode = emac_ptp_mode_master; + else + hw->ptp_mode = emac_ptp_mode_slave; + + hw->frac_ns_adj_tbl = NULL; + hw->frac_ns_adj_tbl_sz = 0; + + tbl = of_get_property(node, "qcom,emac-ptp-frac-ns-adj", &size); + if (!tbl) + return; + + if ((size % sizeof(struct emac_ptp_frac_ns_adj))) { + emac_err(adpt, "emac_ptp: invalid frac-ns-adj tbl size(%d)\n", + size); + return; + } + tbl_size = size / sizeof(struct emac_ptp_frac_ns_adj); + + adj_tbl = kzalloc(size, GFP_KERNEL); + if (!adj_tbl) + return; + + if (of_property_read_u32_array(node, "qcom,emac-ptp-frac-ns-adj", + (u32 *)adj_tbl, size / sizeof(u32))) { + emac_err(adpt, "emac_ptp: failed to read frac-ns-adj tbl\n"); + kfree(adj_tbl); + return; + } + + hw->frac_ns_adj_tbl = adj_tbl; + hw->frac_ns_adj_tbl_sz = tbl_size; +} + +int emac_ptp_init(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + int ret = 0; + + emac_ptp_of_get_property(adpt); + spin_lock_init(&hw->ptp_lock); + emac_ptp_sysfs_create(netdev); + ret = emac_hw_1588_core_disable(hw); + + return ret; +} + +void emac_ptp_remove(struct net_device *netdev) +{ + struct emac_adapter *adpt = netdev_priv(netdev); + struct emac_hw *hw = &adpt->hw; + + kfree(hw->frac_ns_adj_tbl); +} diff --git a/drivers/net/ethernet/qualcomm/emac/emac_ptp.h b/drivers/net/ethernet/qualcomm/emac/emac_ptp.h new file mode 100644 index 000000000000..a9c347155010 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_ptp.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EMAC_PTP_H_ +#define _EMAC_PTP_H_ + +int emac_ptp_init(struct net_device *netdev); +void emac_ptp_remove(struct net_device *netdev); +int emac_ptp_config(struct emac_hw *hw); +int emac_ptp_stop(struct emac_hw *hw); +int emac_ptp_set_linkspeed(struct emac_hw *hw, u32 speed); +int emac_tstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +void emac_ptp_intr(struct emac_hw *hw); + +#endif /* _EMAC_PTP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_regs.h b/drivers/net/ethernet/qualcomm/emac/emac_regs.h new file mode 100644 index 000000000000..6faebb20f9cc --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_regs.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __EMAC_REGS_H__ +#define __EMAC_REGS_H__ + +#define SGMII_PHY_VERSION_1 1 +#define SGMII_PHY_VERSION_2 2 + +/* EMAC register offsets */ +#define EMAC_DMA_MAS_CTRL 0x001400 +#define EMAC_TIMER_INIT_VALUE 0x001404 +#define EMAC_IRQ_MOD_TIM_INIT 0x001408 +#define EMAC_BLK_IDLE_STS 0x00140c +#define EMAC_MDIO_CTRL 0x001414 +#define EMAC_PHY_STS 0x001418 +#define EMAC_PHY_LINK_DELAY 0x00141c +#define EMAC_SYS_ALIV_CTRL 0x001434 +#define EMAC_MDIO_EX_CTRL 0x001440 +#define EMAC_MAC_CTRL 0x001480 +#define EMAC_MAC_IPGIFG_CTRL 0x001484 +#define EMAC_MAC_STA_ADDR0 0x001488 +#define EMAC_MAC_STA_ADDR1 0x00148c +#define EMAC_HASH_TAB_REG0 0x001490 +#define EMAC_HASH_TAB_REG1 0x001494 +#define EMAC_MAC_HALF_DPLX_CTRL 0x001498 +#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c +#define EMAC_WOL_CTRL0 0x0014a0 +#define EMAC_WOL_CTRL1 0x0014a4 +#define EMAC_WOL_CTRL2 0x0014a8 +#define EMAC_RSS_KEY0 0x0014b0 +#define EMAC_RSS_KEY1 0x0014b4 +#define EMAC_RSS_KEY2 0x0014b8 +#define EMAC_RSS_KEY3 0x0014bc +#define EMAC_RSS_KEY4 0x0014c0 +#define EMAC_RSS_KEY5 0x0014c4 +#define EMAC_RSS_KEY6 0x0014c8 +#define EMAC_RSS_KEY7 0x0014cc +#define EMAC_RSS_KEY8 0x0014d0 +#define EMAC_RSS_KEY9 0x0014d4 +#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0 +#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4 +#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8 +#define EMAC_INTER_SRAM_PART9 0x001534 +#define EMAC_DESC_CTRL_0 0x001540 +#define EMAC_DESC_CTRL_1 0x001544 +#define EMAC_DESC_CTRL_2 0x001550 +#define EMAC_DESC_CTRL_10 0x001554 +#define EMAC_DESC_CTRL_12 0x001558 +#define EMAC_DESC_CTRL_13 0x00155c +#define EMAC_DESC_CTRL_3 0x001560 +#define EMAC_DESC_CTRL_4 0x001564 +#define EMAC_DESC_CTRL_5 0x001568 +#define EMAC_DESC_CTRL_14 0x00156c +#define EMAC_DESC_CTRL_15 0x001570 +#define EMAC_DESC_CTRL_16 0x001574 +#define EMAC_DESC_CTRL_6 0x001578 +#define EMAC_DESC_CTRL_8 0x001580 +#define EMAC_DESC_CTRL_9 0x001584 +#define EMAC_DESC_CTRL_11 0x001588 +#define EMAC_TXQ_CTRL_0 0x001590 +#define EMAC_TXQ_CTRL_1 0x001594 +#define EMAC_TXQ_CTRL_2 0x001598 +#define EMAC_RXQ_CTRL_0 0x0015a0 +#define EMAC_RXQ_CTRL_1 0x0015a4 +#define EMAC_RXQ_CTRL_2 0x0015a8 +#define EMAC_RXQ_CTRL_3 0x0015ac +#define EMAC_BASE_CPU_NUMBER 0x0015b8 +#define EMAC_DMA_CTRL 0x0015c0 +#define EMAC_MAILBOX_0 0x0015e0 +#define EMAC_MAILBOX_5 0x0015e4 +#define EMAC_MAILBOX_6 0x0015e8 +#define EMAC_MAILBOX_13 0x0015ec +#define EMAC_MAILBOX_2 0x0015f4 +#define EMAC_MAILBOX_3 0x0015f8 +#define EMAC_INT_STATUS 0x001600 +#define EMAC_INT_MASK 0x001604 +#define EMAC_INT_RETRIG_INIT 0x001608 +#define EMAC_MAILBOX_11 0x00160c +#define EMAC_AXI_MAST_CTRL 0x001610 +#define EMAC_MAILBOX_12 0x001614 +#define EMAC_MAILBOX_9 0x001618 +#define EMAC_MAILBOX_10 0x00161c +#define EMAC_ATHR_HEADER_CTRL 0x001620 +#define EMAC_RXMAC_STATC_REG0 0x001700 +#define EMAC_RXMAC_STATC_REG22 0x001758 +#define EMAC_TXMAC_STATC_REG0 0x001760 +#define EMAC_TXMAC_STATC_REG24 0x0017c0 +#define EMAC_CLK_GATE_CTRL 0x001814 +#define EMAC_CORE_HW_VERSION 0x001974 +#define EMAC_MISC_CTRL 0x001990 +#define EMAC_MAILBOX_7 0x0019e0 +#define EMAC_MAILBOX_8 0x0019e4 +#define EMAC_IDT_TABLE0 0x001b00 +#define EMAC_RXMAC_STATC_REG23 0x001bc8 +#define EMAC_RXMAC_STATC_REG24 0x001bcc +#define EMAC_TXMAC_STATC_REG25 0x001bd0 +#define EMAC_MAILBOX_15 0x001bd4 +#define EMAC_MAILBOX_16 0x001bd8 +#define EMAC_INT1_MASK 0x001bf0 +#define EMAC_INT1_STATUS 0x001bf4 +#define EMAC_INT2_MASK 0x001bf8 +#define EMAC_INT2_STATUS 0x001bfc +#define EMAC_INT3_MASK 0x001c00 +#define EMAC_INT3_STATUS 0x001c04 + +/* EMAC_CSR register offsets */ +#define EMAC_EMAC_WRAPPER_CSR1 0x000000 +#define EMAC_EMAC_WRAPPER_CSR2 0x000004 +#define EMAC_EMAC_WRAPPER_CSR3 0x000008 +#define EMAC_EMAC_WRAPPER_CSR5 0x000010 +#define EMAC_EMAC_WRAPPER_CSR10 0x000024 +#define EMAC_EMAC_WRAPPER_CSR18 0x000044 +#define EMAC_EMAC_WRAPPER_STATUS 0x000100 +#define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104 +#define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108 +#define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c + +/* EMAC_1588 register offsets */ +#define EMAC_P1588_CTRL_REG 0x000048 +#define EMAC_P1588_TX_LATENCY 0x0000d4 +#define EMAC_P1588_INC_VALUE_2 0x0000d8 +#define EMAC_P1588_INC_VALUE_1 0x0000dc +#define EMAC_P1588_NANO_OFFSET_2 0x0000e0 +#define EMAC_P1588_NANO_OFFSET_1 0x0000e4 +#define EMAC_P1588_SEC_OFFSET_3 0x0000e8 +#define EMAC_P1588_SEC_OFFSET_2 0x0000ec +#define EMAC_P1588_SEC_OFFSET_1 0x0000f0 +#define EMAC_P1588_REAL_TIME_5 0x0000f4 +#define EMAC_P1588_REAL_TIME_4 0x0000f8 +#define EMAC_P1588_REAL_TIME_3 0x0000fc +#define EMAC_P1588_REAL_TIME_2 0x000100 +#define EMAC_P1588_REAL_TIME_1 0x000104 +#define EMAC_P1588_ADJUST_RTC 0x000110 +#define EMAC_P1588_PTP_EXPANDED_INT_MASK 0x0003c4 +#define EMAC_P1588_PTP_EXPANDED_INT_STATUS 0x0003c8 +#define EMAC_P1588_RTC_EXPANDED_CONFIG 0x000400 +#define EMAC_P1588_RTC_PRELOADED_5 0x000404 +#define EMAC_P1588_RTC_PRELOADED_4 0x000408 +#define EMAC_P1588_RTC_PRELOADED_3 0x00040c +#define EMAC_P1588_RTC_PRELOADED_2 0x000410 +#define EMAC_P1588_RTC_PRELOADED_1 0x000414 +#define EMAC_P1588_GRAND_MASTER_CONFIG_0 0x000800 +#define EMAC_P1588_GM_PPS_TIMESTAMP_2 0x000814 +#define EMAC_P1588_GM_PPS_TIMESTAMP_1 0x000818 + +#endif /* __EMAC_REGS_H__ */ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_rgmii.c b/drivers/net/ethernet/qualcomm/emac/emac_rgmii.c new file mode 100644 index 000000000000..acdb3fc9e32c --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_rgmii.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC RGMII Controller driver. + */ + +#include "emac_main.h" +#include "emac_hw.h" + +/* RGMII specific macros */ +#define EMAC_RGMII_PLL_LOCK_TIMEOUT (HZ / 1000) /* 1ms */ +#define EMAC_RGMII_CORE_IE_C 0x2001 +#define EMAC_RGMII_PLL_L_VAL 0x14 +#define EMAC_RGMII_PHY_MODE 0 + +static int emac_rgmii_init(struct emac_adapter *adpt) +{ + u32 val; + unsigned long timeout; + struct emac_hw *hw = &adpt->hw; + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, 0, FREQ_MODE); + emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR18, + EMAC_RGMII_CORE_IE_C); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, + RGMII_PHY_MODE_BMSK, + (EMAC_RGMII_PHY_MODE << RGMII_PHY_MODE_SHFT)); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, + PLL_L_VAL_5_0_BMSK, + (EMAC_RGMII_PLL_L_VAL << PLL_L_VAL_5_0_SHFT)); + + /* Reset PHY PLL */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, PLL_RESET); + /* Ensure PLL is in reset */ + wmb(); + usleep_range(10, 15); + + /* power down analog sections of PLL and ensure the same */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, BYPASSNL); + /* Ensure power down is complete before setting configuration */ + wmb(); + usleep_range(10, 15); + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, CKEDGE_SEL); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, + TX_ID_EN_L, RX_ID_EN_L); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, + HDRIVE_BMSK, (0x0 << HDRIVE_SHFT)); + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, WOL_EN, 0); + + /* Reset PHY */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, PHY_RESET); + /* Ensure reset is complete before pulling out of reset */ + wmb(); + usleep_range(10, 15); + + /* Pull PHY out of reset */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0); + /* Ensure that pulling PHY out of reset is complete before enabling the + * enabling + */ + wmb(); + usleep_range(1000, 1500); + + /* Pull PHY PLL out of reset */ + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, PLL_RESET, 0); + /* Ensure PLL is enabled before enabling the AHB clock*/ + wmb(); + usleep_range(10, 15); + + emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR5, + 0, RMII_125_CLK_EN); + /* Ensure AHB clock enable is written to HW before the loop waiting for + * it to complete + */ + wmb(); + + /* wait for PLL to lock */ + timeout = jiffies + EMAC_RGMII_PLL_LOCK_TIMEOUT; + do { + val = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_STATUS); + if (val & PLL_LOCK_DET) + break; + usleep_range(100, 150); + } while (time_after_eq(timeout, jiffies)); + + if (time_after(jiffies, timeout)) { + emac_err(adpt, "PHY PLL lock failed\n"); + return -EIO; + } + + return 0; +} + +static int emac_rgmii_config(struct platform_device *pdev, + struct emac_adapter *adpt) +{ + /* For rgmii phy, the mdio lines are dedicated pins */ + return emac_rgmii_init(adpt); +} + +static void emac_rgmii_reset_nop(struct emac_adapter *adpt) +{ +} + +static int emac_rgmii_link_setup_no_ephy(struct emac_adapter *adpt) +{ + emac_err(adpt, "error rgmii can't setup phy link without ephy\n"); + return -EOPNOTSUPP; +} + +static int emac_rgmii_link_check_no_ephy(struct emac_adapter *adpt, + struct phy_device *phydev) +{ + emac_err(adpt, "error rgmii can't check phy link without ephy\n"); + return -EOPNOTSUPP; +} + +static int emac_rgmii_up_nop(struct emac_adapter *adpt) +{ + return 0; +} + +static void emac_rgmii_down_nop(struct emac_adapter *adpt) +{ +} + +static void emac_rgmii_tx_clk_set_rate(struct emac_adapter *adpt) +{ + struct phy_device *phydev = adpt->phydev; + + switch (phydev->speed) { + case SPEED_1000: + clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_125MHZ); + break; + case SPEED_100: + clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_25MHZ); + break; + case SPEED_10: + clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_2_5MHZ); + break; + default: + emac_err(adpt, "error tx clk set rate because of unknown speed\n"); + } +} + +static void emac_rgmii_periodic_nop(struct emac_adapter *adpt) +{ +} + +struct emac_phy_ops emac_rgmii_ops = { + .config = emac_rgmii_config, + .up = emac_rgmii_up_nop, + .down = emac_rgmii_down_nop, + .reset = emac_rgmii_reset_nop, + .link_setup_no_ephy = emac_rgmii_link_setup_no_ephy, + .link_check_no_ephy = emac_rgmii_link_check_no_ephy, + .tx_clk_set_rate = emac_rgmii_tx_clk_set_rate, + .periodic_task = emac_rgmii_periodic_nop, +}; diff --git a/drivers/net/ethernet/qualcomm/emac/emac_rgmii.h b/drivers/net/ethernet/qualcomm/emac/emac_rgmii.h new file mode 100644 index 000000000000..3fc2a1ec7f0f --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_rgmii.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2015, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EMAC_RGMII_H_ +#define _EMAC_RGMII_H_ + +extern struct emac_phy_ops emac_rgmii_ops; + +#endif /*_EMAC_RGMII_H_*/ diff --git a/drivers/net/ethernet/qualcomm/emac/emac_sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac_sgmii.c new file mode 100644 index 000000000000..4f2890960797 --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_sgmii.c @@ -0,0 +1,852 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver. + */ +#include +#include +#include + +#include "emac_sgmii.h" +#include "emac_hw.h" + +#define PCS_MAX_REG_CNT 10 +#define PLL_MAX_REG_CNT 18 + +void emac_reg_write_all(void __iomem *base, const struct emac_reg_write *itr) +{ + for (; itr->offset != END_MARKER; ++itr) + writel_relaxed(itr->val, base + itr->offset); +} + +static const struct emac_reg_write + physical_coding_sublayer_programming[][PCS_MAX_REG_CNT] = { + /* EMAC_PHY_MAP_DEFAULT */ + { + {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)}, + {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B}, + {EMAC_SGMII_PHY_CMN_PWR_CTRL, + BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN + | PLL_RXCLK_EN}, + {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN + | L0_TRAN_BIAS_EN}, + {EMAC_SGMII_PHY_RX_PWR_CTRL, + L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN}, + {EMAC_SGMII_PHY_CMN_PWR_CTRL, + BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN + | PLL_TXCLK_EN | PLL_RXCLK_EN}, + {EMAC_SGMII_PHY_LANE_CTRL1, + L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN + | L0_DRV_LVL(15)}, + {END_MARKER, END_MARKER}, + }, + /* EMAC_PHY_MAP_MDM9607 */ + { + {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)}, + {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B}, + {EMAC_SGMII_PHY_CMN_PWR_CTRL, + BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN + | PLL_RXCLK_EN}, + {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN + | L0_TRAN_BIAS_EN}, + {EMAC_SGMII_PHY_RX_PWR_CTRL, + L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN}, + {EMAC_SGMII_PHY_CMN_PWR_CTRL, + BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN + | PLL_TXCLK_EN | PLL_RXCLK_EN}, + {EMAC_QSERDES_COM_PLL_VCOTAIL_EN, PLL_VCO_TAIL_MUX | + PLL_VCO_TAIL(124) | PLL_EN_VCOTAIL_EN}, + {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN + | PLL_DIV_ORD}, + {EMAC_SGMII_PHY_LANE_CTRL1, + L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN + | L0_DRV_LVL(15)}, + {END_MARKER, END_MARKER} + }, + /* EMAC_PHY_MAP_V2 */ + { + {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B}, + {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)}, + {EMAC_SGMII_PHY_TX_PWR_CTRL, 0}, + {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE}, + {END_MARKER, END_MARKER} + } +}; + +static const struct emac_reg_write sysclk_refclk_setting[] = { + {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS}, + {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE}, + {END_MARKER, END_MARKER}, +}; + +static const struct emac_reg_write pll_setting[][PLL_MAX_REG_CNT] = { + /* EMAC_PHY_MAP_DEFAULT */ + { + {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)}, + {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)}, + {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)}, + {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)}, + {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)}, + {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN + | PLL_DIV_ORD}, + {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)}, + {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2}, + {EMAC_QSERDES_COM_DIV_FRAC_START1, + DIV_FRAC_START_MUX | DIV_FRAC_START(85)}, + {EMAC_QSERDES_COM_DIV_FRAC_START2, + DIV_FRAC_START_MUX | DIV_FRAC_START(42)}, + {EMAC_QSERDES_COM_DIV_FRAC_START3, + DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN}, + {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE}, + {END_MARKER, END_MARKER} + }, + /* EMAC_PHY_MAP_MDM9607 */ + { + {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(3)}, + {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)}, + {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)}, + {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)}, + {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)}, + {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)}, + {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2}, + {EMAC_QSERDES_COM_DIV_FRAC_START1, + DIV_FRAC_START_MUX | DIV_FRAC_START(85)}, + {EMAC_QSERDES_COM_DIV_FRAC_START2, + DIV_FRAC_START_MUX | DIV_FRAC_START(42)}, + {EMAC_QSERDES_COM_DIV_FRAC_START3, + DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)}, + {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN}, + {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE}, + {EMAC_QSERDES_COM_RES_TRIM_SEARCH, RESTRIM_SEARCH(0)}, + {EMAC_QSERDES_COM_BGTC, BGTC(7)}, + {END_MARKER, END_MARKER}, + } +}; + +static const struct emac_reg_write cdr_setting[] = { + {EMAC_QSERDES_RX_CDR_CONTROL, + SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)}, + {EMAC_QSERDES_RX_CDR_CONTROL2, + SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)}, + {END_MARKER, END_MARKER}, +}; + +static const struct emac_reg_write tx_rx_setting[] = { + {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0}, + {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)}, + {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN}, + {EMAC_QSERDES_TX_TX_EMP_POST1_LVL, + TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)}, + {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)}, + {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)}, + {END_MARKER, END_MARKER} +}; + +static const struct emac_reg_write sgmii_v2_laned[] = { + /* CDR Settings */ + {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, + UCDR_STEP_BY_TWO_MODE0 | UCDR_XO_GAIN_MODE(10)}, + {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_XO_GAIN_MODE(0)}, + {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, + + /* TX/RX Settings */ + {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2}, + + {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE}, + {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN}, + {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)}, + {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX}, + {EMAC_SGMII_LN_TX_POST, TX_POST_MUX}, + + {EMAC_SGMII_LN_CML_CTRL_MODE0, + CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)}, + {EMAC_SGMII_LN_MIXER_CTRL_MODE0, + MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)}, + {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)}, + {EMAC_SGMII_LN_SIGDET_ENABLES, + SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP}, + {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)}, + + {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)}, + {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0}, + {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV, + DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)}, + + {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)}, + {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)}, + {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)}, + {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)}, + {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)}, + {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL}, + {END_MARKER, END_MARKER} +}; + +void emac_sgmii_reset_prepare(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + u32 val; + + /* Reset PHY */ + val = readl_relaxed(sgmii->base + EMAC_EMAC_WRAPPER_CSR2); + writel_relaxed(((val & ~PHY_RESET) | PHY_RESET), + sgmii->base + EMAC_EMAC_WRAPPER_CSR2); + /* Ensure phy-reset command is written to HW before the release cmd */ + wmb(); + msleep(50); + val = readl_relaxed(sgmii->base + EMAC_EMAC_WRAPPER_CSR2); + writel_relaxed((val & ~PHY_RESET), + sgmii->base + EMAC_EMAC_WRAPPER_CSR2); + /* Ensure phy-reset release command is written to HW before initializing + * SGMII + */ + wmb(); + msleep(50); +} + +static void emac_sgmii_reset(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + int ret; + + emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, EMC_CLK_RATE_19_2MHZ); + emac_sgmii_reset_prepare(adpt); + + ret = sgmii->initialize(adpt); + if (ret) + emac_err(adpt, + "could not reinitialize internal PHY (error=%i)\n", + ret); + + emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, EMC_CLK_RATE_125MHZ); +} + +/* LINK */ +int emac_sgmii_link_init(struct emac_adapter *adpt) +{ + struct phy_device *phydev = adpt->phydev; + struct emac_sgmii *sgmii = adpt->phy.private; + u32 val; + int autoneg, speed, duplex; + + autoneg = (adpt->phydev) ? phydev->autoneg : AUTONEG_ENABLE; + speed = (adpt->phydev) ? phydev->speed : SPEED_UNKNOWN; + duplex = (adpt->phydev) ? phydev->duplex : DUPLEX_UNKNOWN; + + val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2); + + if (autoneg == AUTONEG_ENABLE) { + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); + val |= AN_ENABLE; + writel_relaxed(val, + sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2); + } else { + u32 speed_cfg = 0; + + switch (speed) { + case SPEED_10: + speed_cfg = SPDMODE_10; + break; + case SPEED_100: + speed_cfg = SPDMODE_100; + break; + case SPEED_1000: + speed_cfg = SPDMODE_1000; + break; + default: + return -EINVAL; + } + + if (duplex == DUPLEX_FULL) + speed_cfg |= DUPLEX_MODE; + + val &= ~AN_ENABLE; + writel_relaxed(speed_cfg, + sgmii->base + EMAC_SGMII_PHY_SPEED_CFG1); + writel_relaxed(val, sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2); + } + /* Ensure Auto-Neg setting are written to HW before leaving */ + wmb(); + + return 0; +} + +int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + u32 status; + + writel_relaxed(irq_bits, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); + writel_relaxed(IRQ_GLOBAL_CLEAR, sgmii->base + EMAC_SGMII_PHY_IRQ_CMD); + /* Ensure interrupt clear command is written to HW */ + wmb(); + + /* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must + * be confirmed before clearing the bits in other registers. + * It takes a few cycles for hw to clear the interrupt status. + */ + if (readl_poll_timeout_atomic(sgmii->base + + EMAC_SGMII_PHY_INTERRUPT_STATUS, + status, !(status & irq_bits), 1, + SGMII_PHY_IRQ_CLR_WAIT_TIME)) { + emac_err(adpt, + "error: failed clear SGMII irq: status:0x%x bits:0x%x\n", + status, irq_bits); + return -EIO; + } + + /* Finalize clearing procedure */ + writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_IRQ_CMD); + writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); + /* Ensure that clearing procedure finalization is written to HW */ + wmb(); + + return 0; +} + +int emac_sgmii_init_ephy_nop(struct emac_adapter *adpt) +{ + return 0; +} + +int emac_sgmii_autoneg_check(struct emac_adapter *adpt, + struct phy_device *phydev) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + u32 autoneg0, autoneg1, status; + + autoneg0 = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG0_STATUS); + autoneg1 = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG1_STATUS); + status = ((autoneg1 & 0xff) << 8) | (autoneg0 & 0xff); + + if (!(status & TXCFG_LINK)) { + phydev->link = false; + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + return 0; + } + + phydev->link = true; + + switch (status & TXCFG_MODE_BMSK) { + case TXCFG_1000_FULL: + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + break; + case TXCFG_100_FULL: + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + break; + case TXCFG_100_HALF: + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_HALF; + break; + case TXCFG_10_FULL: + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_FULL; + break; + case TXCFG_10_HALF: + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_HALF; + break; + default: + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + break; + } + return 0; +} + +int emac_sgmii_link_check_no_ephy(struct emac_adapter *adpt, + struct phy_device *phydev) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + u32 val; + + val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2); + if (val & AN_ENABLE) + return emac_sgmii_autoneg_check(adpt, phydev); + + val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_SPEED_CFG1); + val &= DUPLEX_MODE | SPDMODE_BMSK; + switch (val) { + case DUPLEX_MODE | SPDMODE_1000: + phydev->speed = SPEED_1000; + phydev->duplex = DUPLEX_FULL; + break; + case DUPLEX_MODE | SPDMODE_100: + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_FULL; + break; + case SPDMODE_100: + phydev->speed = SPEED_100; + phydev->duplex = DUPLEX_HALF; + break; + case DUPLEX_MODE | SPDMODE_10: + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_FULL; + break; + case SPDMODE_10: + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_HALF; + break; + default: + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + break; + } + phydev->link = true; + return 0; +} + +irqreturn_t emac_sgmii_isr(int _irq, void *data) +{ + struct emac_adapter *adpt = data; + struct emac_sgmii *sgmii = adpt->phy.private; + u32 status; + + emac_dbg(adpt, intr, adpt->netdev, "receive sgmii interrupt\n"); + + do { + status = readl_relaxed(sgmii->base + + EMAC_SGMII_PHY_INTERRUPT_STATUS) & + SGMII_ISR_MASK; + if (!status) + break; + + if (status & SGMII_PHY_INTERRUPT_ERR) { + SET_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ); + if (!TEST_FLAG(adpt, ADPT_STATE_DOWN)) + emac_task_schedule(adpt); + } + + if (status & SGMII_ISR_AN_MASK) + emac_check_lsc(adpt); + + if (emac_sgmii_irq_clear(adpt, status) != 0) { + /* reset */ + SET_FLAG(adpt, ADPT_TASK_REINIT_REQ); + emac_task_schedule(adpt); + break; + } + } while (1); + + return IRQ_HANDLED; +} + +int emac_sgmii_up(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + int ret; + + ret = request_irq(sgmii->irq, emac_sgmii_isr, IRQF_TRIGGER_RISING, + "sgmii_irq", adpt); + if (ret) + emac_err(adpt, + "error:%d on request_irq(%d:sgmii_irq)\n", ret, + sgmii->irq); + + /* enable sgmii irq */ + writel_relaxed(SGMII_ISR_MASK, + sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + + return ret; +} + +void emac_sgmii_down(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + + writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + synchronize_irq(sgmii->irq); + free_irq(sgmii->irq, adpt); +} + +int emac_sgmii_link_setup_no_ephy(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + + /* The AN_ENABLE and SPEED_CFG can't change on fly. The SGMII_PHY has + * to be re-initialized. + */ + emac_sgmii_reset_prepare(adpt); + return sgmii->initialize(adpt); +} + +void emac_sgmii_tx_clk_set_rate_nop(struct emac_adapter *adpt) +{ +} + +/* Check SGMII for error */ +void emac_sgmii_periodic_check(struct emac_adapter *adpt) +{ + struct emac_sgmii *sgmii = adpt->phy.private; + + if (!TEST_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ)) + return; + CLR_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ); + + /* ensure that no reset is in progress while link task is running */ + while (TEST_N_SET_FLAG(adpt, ADPT_STATE_RESETTING)) + msleep(20); /* Reset might take few 10s of ms */ + + if (TEST_FLAG(adpt, ADPT_STATE_DOWN)) + goto sgmii_task_done; + + if (readl_relaxed(sgmii->base + EMAC_SGMII_PHY_RX_CHK_STATUS) & 0x40) + goto sgmii_task_done; + + emac_err(adpt, "SGMII CDR not locked\n"); + +sgmii_task_done: + CLR_FLAG(adpt, ADPT_STATE_RESETTING); +} + +static int emac_sgmii_init_v1_0(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *sgmii = phy->private; + unsigned int i; + int ret; + + ret = emac_sgmii_link_init(adpt); + if (ret) + return ret; + + emac_reg_write_all(sgmii->base, + (const struct emac_reg_write *) + &physical_coding_sublayer_programming[EMAC_PHY_MAP_DEFAULT]); + + /* Ensure Rx/Tx lanes power configuration is written to hw before + * configuring the SerDes engine's clocks + */ + wmb(); + + emac_reg_write_all(sgmii->base, sysclk_refclk_setting); + emac_reg_write_all(sgmii->base, + (const struct emac_reg_write *) + &pll_setting[EMAC_PHY_MAP_DEFAULT]); + emac_reg_write_all(sgmii->base, cdr_setting); + emac_reg_write_all(sgmii->base, tx_rx_setting); + + /* Ensure SerDes engine configuration is written to hw before powering + * it up + */ + wmb(); + + writel_relaxed(SERDES_START, sgmii->base + EMAC_SGMII_PHY_SERDES_START); + + /* Ensure Rx/Tx SerDes engine power-up command is written to HW */ + wmb(); + + for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { + if (readl_relaxed(sgmii->base + EMAC_QSERDES_COM_RESET_SM) + & READY) + break; + usleep_range(100, 200); + } + + if (i == SERDES_START_WAIT_TIMES) { + emac_err(adpt, "serdes failed to start\n"); + return -EIO; + } + /* Mask out all the SGMII Interrupt */ + writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + /* Ensure SGMII interrupts are masked out before clearing them */ + wmb(); + + emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + + return 0; +} + +static int emac_sgmii_init_v1_1(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *sgmii = phy->private; + unsigned int i; + int ret; + + ret = emac_sgmii_link_init(adpt); + if (ret) + return ret; + + emac_reg_write_all(sgmii->base, + (const struct emac_reg_write *) + &physical_coding_sublayer_programming[EMAC_PHY_MAP_MDM9607]); + + /* Ensure Rx/Tx lanes power configuration is written to hw before + * configuring the SerDes engine's clocks + */ + wmb(); + + emac_reg_write_all(sgmii->base, sysclk_refclk_setting); + emac_reg_write_all(sgmii->base, + (const struct emac_reg_write *) + &pll_setting[EMAC_PHY_MAP_MDM9607]); + emac_reg_write_all(sgmii->base, cdr_setting); + emac_reg_write_all(sgmii->base, tx_rx_setting); + + /* Ensure SerDes engine configuration is written to hw before powering + * it up + */ + wmb(); + + /* Power up the Ser/Des engine */ + writel_relaxed(SERDES_START, sgmii->base + EMAC_SGMII_PHY_SERDES_START); + + /* Ensure Rx/Tx SerDes engine power-up command is written to HW */ + wmb(); + + for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { + if (readl_relaxed(sgmii->base + EMAC_QSERDES_COM_RESET_SM) + & READY) + break; + usleep_range(100, 200); + } + + if (i == SERDES_START_WAIT_TIMES) { + emac_err(adpt, "serdes failed to start\n"); + return -EIO; + } + /* Mask out all the SGMII Interrupt */ + writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK); + /* Ensure SGMII interrupts are masked out before clearing them */ + wmb(); + + emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + + return 0; +} + +static int emac_sgmii_init_v2(struct emac_adapter *adpt) +{ + struct emac_phy *phy = &adpt->phy; + struct emac_sgmii *sgmii = phy->private; + void __iomem *phy_regs = sgmii->base; + void __iomem *laned = sgmii->digital; + unsigned int i; + u32 lnstatus; + int ret; + + ret = emac_sgmii_link_init(adpt); + if (ret) + return ret; + + /* PCS lane-x init */ + emac_reg_write_all(sgmii->base, + (const struct emac_reg_write *) + &physical_coding_sublayer_programming[EMAC_PHY_MAP_V2]); + + /* Ensure Rx/Tx lanes power configuration is written to hw before + * configuring the SerDes engine's clocks + */ + wmb(); + + /* SGMII lane-x init */ + emac_reg_write_all(sgmii->digital, sgmii_v2_laned); + + /* Power up PCS and start reset lane state machine */ + writel_relaxed(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL); + writel_relaxed(1, laned + SGMII_LN_RSM_START); + wmb(); /* ensure power up is written before checking lane status */ + + /* Wait for c_ready assertion */ + for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { + lnstatus = readl_relaxed(phy_regs + SGMII_PHY_LN_LANE_STATUS); + rmb(); /* ensure status read is complete before testing it */ + if (lnstatus & BIT(1)) + break; + usleep_range(100, 200); + } + + if (i == SERDES_START_WAIT_TIMES) { + emac_err(adpt, "SGMII failed to start\n"); + return -EIO; + } + + /* Disable digital and SERDES loopback */ + writel_relaxed(0, phy_regs + SGMII_PHY_LN_BIST_GEN0); + writel_relaxed(0, phy_regs + SGMII_PHY_LN_BIST_GEN2); + writel_relaxed(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1); + + /* Mask out all the SGMII Interrupt */ + writel_relaxed(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK); + wmb(); /* ensure writes are flushed to hw */ + + emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); + + return 0; +} + +static int emac_sgmii_acpi_match(struct device *dev, void *data) +{ + static const struct acpi_device_id match_table[] = { + { + .id = "QCOM8071", + .driver_data = (kernel_ulong_t)emac_sgmii_init_v2, + }, + {} + }; + const struct acpi_device_id *id = acpi_match_device(match_table, dev); + emac_sgmii_initialize *initialize = data; + + if (id) + *initialize = (emac_sgmii_initialize)id->driver_data; + + return !!id; +} + +static const struct of_device_id emac_sgmii_dt_match[] = { + { + .compatible = "qcom,fsm9900-emac-sgmii", + .data = emac_sgmii_init_v1_0, + }, + { + .compatible = "qcom,qdf2432-emac-sgmii", + .data = emac_sgmii_init_v2, + }, + { + .compatible = "qcom,mdm9607-emac-sgmii", + .data = emac_sgmii_init_v1_1, + }, + {} +}; + +int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) +{ + struct platform_device *sgmii_pdev = NULL; + struct emac_sgmii *sgmii; + struct resource *res; + int ret = 0; + + sgmii = devm_kzalloc(&pdev->dev, sizeof(*sgmii), GFP_KERNEL); + if (!sgmii) + return -ENOMEM; + + if (ACPI_COMPANION(&pdev->dev)) { + struct device *dev; + + dev = device_find_child(&pdev->dev, &sgmii->initialize, + emac_sgmii_acpi_match); + + if (!dev) { + emac_err(adpt, "cannot find internal phy node\n"); + return -ENODEV; + } + + sgmii_pdev = to_platform_device(dev); + } else { + const struct of_device_id *match; + struct device_node *np; + + np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0); + if (!np) { + emac_err(adpt, "missing internal-phy property\n"); + return -ENODEV; + } + + sgmii_pdev = of_find_device_by_node(np); + if (!sgmii_pdev) { + emac_err(adpt, "invalid internal-phy property\n"); + return -ENODEV; + } + + match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev); + if (!match) { + emac_err(adpt, "unrecognized internal phy node\n"); + ret = -ENODEV; + goto error_put_device; + } + + sgmii->initialize = (emac_sgmii_initialize)match->data; + } + + /* Base address is the first address */ + res = platform_get_resource_byname(sgmii_pdev, IORESOURCE_MEM, + "emac_sgmii"); + if (!res) { + emac_err(adpt, + "error platform_get_resource_byname(emac_sgmii)\n"); + ret = -EINVAL; + goto error_put_device; + } + + sgmii->base = ioremap(res->start, resource_size(res)); + if (IS_ERR(sgmii->base)) { + emac_err(adpt, + "error:%ld remap (start:0x%lx size:0x%lx)\n", + PTR_ERR(sgmii->base), (ulong)res->start, + (ulong)resource_size(res)); + ret = -ENOMEM; + goto error_put_device; + } + + /* v2 SGMII has a per-lane digital, so parse it if it exists */ + res = platform_get_resource_byname(sgmii_pdev, IORESOURCE_MEM, + "emac_digital"); + if (res) { + sgmii->digital = devm_ioremap_resource(&sgmii_pdev->dev, res); + if (!sgmii->digital) { + ret = -ENOMEM; + goto error_unmap_base; + } + } + + ret = platform_get_irq_byname(sgmii_pdev, "emac_sgmii_irq"); + if (ret < 0) + goto error; + + sgmii->irq = ret; + adpt->phy.private = sgmii; + + ret = sgmii->initialize(adpt); + if (ret) + goto error; + + /* We've remapped the addresses, so we don't need the device any + * more. of_find_device_by_node() says we should release it. + */ + put_device(&sgmii_pdev->dev); + + return 0; + +error: + if (sgmii->digital) + iounmap(sgmii->digital); +error_unmap_base: + iounmap(sgmii->base); +error_put_device: + put_device(&sgmii_pdev->dev); + + return ret; +} + +struct emac_phy_ops emac_sgmii_ops = { + .config = emac_sgmii_config, + .up = emac_sgmii_up, + .down = emac_sgmii_down, + .reset = emac_sgmii_reset, + .link_setup_no_ephy = emac_sgmii_link_setup_no_ephy, + .link_check_no_ephy = emac_sgmii_link_check_no_ephy, + .tx_clk_set_rate = emac_sgmii_tx_clk_set_rate_nop, + .periodic_task = emac_sgmii_periodic_check, +}; diff --git a/drivers/net/ethernet/qualcomm/emac/emac_sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac_sgmii.h new file mode 100644 index 000000000000..4b1e3d85fa2a --- /dev/null +++ b/drivers/net/ethernet/qualcomm/emac/emac_sgmii.h @@ -0,0 +1,398 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _EMAC_SGMII_H_ +#define _EMAC_SGMII_H_ + +#include "emac_main.h" + +/* EMAC_QSERDES register offsets */ +#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000 +#define EMAC_QSERDES_COM_PLL_VCOTAIL_EN 0x000004 +#define EMAC_QSERDES_COM_PLL_CNTRL 0x000014 +#define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018 +#define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024 +#define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028 +#define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c +#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038 +#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040 +#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044 +#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048 +#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c +#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050 +#define EMAC_QSERDES_COM_BGTC 0x000058 +#define EMAC_QSERDES_COM_DEC_START1 0x000064 +#define EMAC_QSERDES_COM_RES_TRIM_SEARCH 0x000088 +#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098 +#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c +#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0 +#define EMAC_QSERDES_COM_DEC_START2 0x0000a4 +#define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac +#define EMAC_QSERDES_COM_RESET_SM 0x0000bc +#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100 +#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108 +#define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c +#define EMAC_QSERDES_TX_LANE_MODE 0x000150 +#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170 +#define EMAC_QSERDES_RX_CDR_CONTROL 0x000200 +#define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210 +#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230 + +/* EMAC_SGMII register offsets */ +#define EMAC_SGMII_PHY_SERDES_START 0x000300 +#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000304 +#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000308 +#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00030C +#define EMAC_SGMII_PHY_LANE_CTRL1 0x000318 +#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000348 +#define EMAC_SGMII_PHY_CDR_CTRL0 0x000358 +#define EMAC_SGMII_PHY_SPEED_CFG1 0x000374 +#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000380 +#define EMAC_SGMII_PHY_RESET_CTRL 0x0003a8 +#define EMAC_SGMII_PHY_IRQ_CMD 0x0003ac +#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0003b0 +#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0003b4 +#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0003b8 +#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0003d4 +#define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0003e0 +#define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0003e4 + +/* EMAC_QSERDES_COM_PLL_IP_SETI */ +#define PLL_IPSETI(x) ((x) & 0x3f) + +/* EMAC_QSERDES_COM_PLL_CP_SETI */ +#define PLL_CPSETI(x) ((x) & 0xff) + +/* EMAC_QSERDES_COM_PLL_IP_SETP */ +#define PLL_IPSETP(x) ((x) & 0x3f) + +/* EMAC_QSERDES_COM_PLL_CP_SETP */ +#define PLL_CPSETP(x) ((x) & 0x1f) + +/* EMAC_QSERDES_COM_PLL_CRCTRL */ +#define PLL_RCTRL(x) (((x) & 0xf) << 4) +#define PLL_CCTRL(x) ((x) & 0xf) + +/* SGMII v2 PHY registers per lane */ +#define EMAC_SGMII_PHY_LN_OFFSET 0x0400 + +/* SGMII v2 digital lane registers */ +#define EMAC_SGMII_LN_DRVR_CTRL0 0x00C +#define EMAC_SGMII_LN_DRVR_TAP_EN 0x018 +#define EMAC_SGMII_LN_TX_MARGINING 0x01C +#define EMAC_SGMII_LN_TX_PRE 0x020 +#define EMAC_SGMII_LN_TX_POST 0x024 +#define EMAC_SGMII_LN_TX_BAND_MODE 0x060 +#define EMAC_SGMII_LN_LANE_MODE 0x064 +#define EMAC_SGMII_LN_PARALLEL_RATE 0x078 +#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8 +#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0 +#define EMAC_SGMII_LN_VGA_INITVAL 0x134 +#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C +#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188 +#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194 +#define EMAC_SGMII_LN_RX_BAND 0x19C +#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8 +#define EMAC_SGMII_LN_RSM_CONFIG 0x1F0 +#define EMAC_SGMII_LN_SIGDET_ENABLES 0x224 +#define EMAC_SGMII_LN_SIGDET_CNTRL 0x228 +#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C +#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0 +#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC +#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC + +/* SGMII v2 per lane registers */ +#define SGMII_LN_RSM_START 0x029C + +/* SGMII v2 PHY common registers */ +#define SGMII_PHY_CMN_CTRL 0x0408 +#define SGMII_PHY_CMN_RESET_CTRL 0x0410 + +/* SGMII v2 PHY registers per lane */ +#define SGMII_PHY_LN_OFFSET 0x0400 +#define SGMII_PHY_LN_LANE_STATUS 0x00DC +#define SGMII_PHY_LN_BIST_GEN0 0x008C +#define SGMII_PHY_LN_BIST_GEN1 0x0090 +#define SGMII_PHY_LN_BIST_GEN2 0x0094 +#define SGMII_PHY_LN_BIST_GEN3 0x0098 +#define SGMII_PHY_LN_CDR_CTRL1 0x005C + +/* SGMII v2 digital lane register values */ +#define UCDR_STEP_BY_TWO_MODE0 BIT(7) +#define UCDR_XO_GAIN_MODE(x) ((x) & 0x7f) +#define UCDR_ENABLE BIT(6) +#define UCDR_SO_SATURATION(x) ((x) & 0x3f) +#define SIGDET_LP_BYP_PS4 BIT(7) +#define SIGDET_EN_PS0_TO_PS2 BIT(6) +#define EN_ACCOUPLEVCM_SW_MUX BIT(5) +#define EN_ACCOUPLEVCM_SW BIT(4) +#define RX_SYNC_EN BIT(3) +#define RXTERM_HIGHZ_PS5 BIT(2) +#define SIGDET_EN_PS3 BIT(1) +#define EN_ACCOUPLE_VCM_PS3 BIT(0) +#define UFS_MODE BIT(5) +#define TXVAL_VALID_INIT BIT(4) +#define TXVAL_VALID_MUX BIT(3) +#define TXVAL_VALID BIT(2) +#define USB3P1_MODE BIT(1) +#define KR_PCIGEN3_MODE BIT(0) +#define PRE_EN BIT(3) +#define POST_EN BIT(2) +#define MAIN_EN_MUX BIT(1) +#define MAIN_EN BIT(0) +#define TX_MARGINING_MUX BIT(6) +#define TX_MARGINING(x) ((x) & 0x3f) +#define TX_PRE_MUX BIT(6) +#define TX_PRE(x) ((x) & 0x3f) +#define TX_POST_MUX BIT(6) +#define TX_POST(x) ((x) & 0x3f) +#define CML_GEAR_MODE(x) (((x) & 7) << 3) +#define CML2CMOS_IBOOST_MODE(x) ((x) & 7) +#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2) +#define MIXER_DATARATE_MODE(x) ((x) & 3) +#define VGA_THRESH_DFE(x) ((x) & 0x3f) +#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5) +#define SIGDET_LP_BYP_MUX BIT(4) +#define SIGDET_LP_BYP BIT(3) +#define SIGDET_EN_MUX BIT(2) +#define SIGDET_EN BIT(1) +#define SIGDET_FLT_BYP BIT(0) +#define SIGDET_LVL(x) (((x) & 0xf) << 4) +#define SIGDET_BW_CTRL(x) ((x) & 0xf) +#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1) +#define SIGDET_DEGLITCH_BYP BIT(0) +#define INVERT_PCS_RX_CLK BIT(7) +#define PWM_EN BIT(6) +#define RXBIAS_SEL(x) (((x) & 0x3) << 4) +#define EBDAC_SIGN BIT(3) +#define EDAC_SIGN BIT(2) +#define EN_AUXTAP1SIGN_INVERT BIT(1) +#define EN_DAC_CHOPPING BIT(0) +#define DRVR_LOGIC_CLK_EN BIT(4) +#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf) +#define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4) +#define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2) +#define PARALLEL_RATE_MODE0(x) ((x) & 0x3) +#define BAND_MODE2(x) (((x) & 0x3) << 4) +#define BAND_MODE1(x) (((x) & 0x3) << 2) +#define BAND_MODE0(x) ((x) & 0x3) +#define LANE_SYNC_MODE BIT(5) +#define LANE_MODE(x) ((x) & 0x1f) +#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5) +#define EN_DLL_MODE0 BIT(4) +#define EN_IQ_DCC_MODE0 BIT(3) +#define EN_IQCAL_MODE0 BIT(2) +#define EN_QPATH_MODE0 BIT(1) +#define EN_EPATH_MODE0 BIT(0) +#define FORCE_TSYNC_ACK BIT(7) +#define FORCE_CMN_ACK BIT(6) +#define FORCE_CMN_READY BIT(5) +#define EN_RCLK_DEGLITCH BIT(4) +#define BYPASS_RSM_CDR_RESET BIT(3) +#define BYPASS_RSM_TSYNC BIT(2) +#define BYPASS_RSM_SAMP_CAL BIT(1) +#define BYPASS_RSM_DLL_CAL BIT(0) + +/* EMAC_QSERDES_COM_SYS_CLK_CTRL */ +#define SYSCLK_CM BIT(4) +#define SYSCLK_AC_COUPLE BIT(3) + +/* EMAC_QSERDES_COM_PLL_VCOTAIL_EN */ +#define PLL_VCO_TAIL_MUX BIT(7) +#define PLL_VCO_TAIL(x) ((x) & 0x7c) +#define PLL_EN_VCOTAIL_EN BIT(0) + +/* EMAC_QSERDES_COM_PLL_CNTRL */ +#define OCP_EN BIT(5) +#define PLL_DIV_FFEN BIT(2) +#define PLL_DIV_ORD BIT(1) + +/* EMAC_QSERDES_COM_SYSCLK_EN_SEL */ +#define SYSCLK_SEL_CMOS BIT(3) + +/* EMAC_QSERDES_COM_RES_TRIM_SEARCH */ +#define RESTRIM_SEARCH(x) ((x) & 0xff) + +/* EMAC_QSERDES_COM_BGTC */ +#define BGTC(x) ((x) & 0x1f) + +/* EMAC_QSERDES_COM_RESETSM_CNTRL */ +#define FRQ_TUNE_MODE BIT(4) + +/* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */ +#define PLLLOCK_CMP_EN BIT(0) + +/* EMAC_QSERDES_COM_DEC_START1 */ +#define DEC_START1_MUX BIT(7) +#define DEC_START1(x) ((x) & 0x7f) + +/* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */ +#define DIV_FRAC_START_MUX BIT(7) +#define DIV_FRAC_START(x) ((x) & 0x7f) + +/* EMAC_QSERDES_COM_DIV_FRAC_START3 */ +#define DIV_FRAC_START3_MUX BIT(4) +#define DIV_FRAC_START3(x) ((x) & 0xf) + +/* EMAC_QSERDES_COM_DEC_START2 */ +#define DEC_START2_MUX BIT(1) +#define DEC_START2 BIT(0) + +/* EMAC_QSERDES_COM_RESET_SM */ +#define READY BIT(5) + +/* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */ +#define TX_EMP_POST1_LVL_MUX BIT(5) +#define TX_EMP_POST1_LVL(x) ((x) & 0x1f) +#define TX_EMP_POST1_LVL_BMSK 0x1f +#define TX_EMP_POST1_LVL_SHFT 0 + +/* EMAC_QSERDES_TX_TX_DRV_LVL */ +#define TX_DRV_LVL_MUX BIT(4) +#define TX_DRV_LVL(x) ((x) & 0xf) + +/* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */ +#define EMP_EN_MUX BIT(1) +#define EMP_EN BIT(0) + +/* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */ +#define HBW_PD_EN BIT(7) +#define SECONDORDERENABLE BIT(6) +#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3) +#define SECONDORDERGAIN(x) ((x) & 0x7) + +/* EMAC_QSERDES_RX_RX_EQ_GAIN12 */ +#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4) +#define RX_EQ_GAIN1(x) ((x) & 0xf) + +/* EMAC_SGMII_PHY_SERDES_START */ +#define SERDES_START BIT(0) + +/* EMAC_SGMII_PHY_CMN_PWR_CTRL */ +#define BIAS_EN BIT(6) +#define PLL_EN BIT(5) +#define SYSCLK_EN BIT(4) +#define CLKBUF_L_EN BIT(3) +#define PLL_TXCLK_EN BIT(1) +#define PLL_RXCLK_EN BIT(0) + +/* EMAC_SGMII_PHY_RX_PWR_CTRL */ +#define L0_RX_SIGDET_EN BIT(7) +#define L0_RX_TERM_MODE(x) (((x) & 3) << 4) +#define L0_RX_I_EN BIT(1) + +/* EMAC_SGMII_PHY_TX_PWR_CTRL */ +#define L0_TX_EN BIT(5) +#define L0_CLKBUF_EN BIT(4) +#define L0_TRAN_BIAS_EN BIT(1) + +/* EMAC_SGMII_PHY_LANE_CTRL1 */ +#define L0_RX_EQUALIZE_ENABLE BIT(6) +#define L0_RESET_TSYNC_EN BIT(4) +#define L0_DRV_LVL(x) ((x) & 0xf) + +/* EMAC_SGMII_PHY_AUTONEG_CFG2 */ +#define FORCE_AN_TX_CFG BIT(5) +#define FORCE_AN_RX_CFG BIT(4) +#define AN_ENABLE BIT(0) + +/* EMAC_SGMII_PHY_SPEED_CFG1 */ +#define DUPLEX_MODE BIT(4) +#define SPDMODE_1000 BIT(1) +#define SPDMODE_100 BIT(0) +#define SPDMODE_10 0 +#define SPDMODE_BMSK 3 +#define SPDMODE_SHFT 0 + +/* EMAC_SGMII_PHY_POW_DWN_CTRL0 */ +#define PWRDN_B BIT(0) +#define CDR_MAX_CNT(x) ((x) & 0xff) + +/* EMAC_QSERDES_TX_BIST_MODE_LANENO */ +#define BIST_LANE_NUMBER(x) (((x) & 3) << 5) +#define BISTMODE(x) ((x) & 0x1f) + +/* EMAC_QSERDES_COM_PLLLOCK_CMPx */ +#define PLLLOCK_CMP(x) ((x) & 0xff) + +/* EMAC_SGMII_PHY_RESET_CTRL */ +#define PHY_SW_RESET BIT(0) + +/* EMAC_SGMII_PHY_IRQ_CMD */ +#define IRQ_GLOBAL_CLEAR BIT(0) + +/* EMAC_SGMII_PHY_INTERRUPT_MASK */ +#define DECODE_CODE_ERR BIT(7) +#define DECODE_DISP_ERR BIT(6) +#define PLL_UNLOCK BIT(5) +#define AN_ILLEGAL_TERM BIT(4) +#define SYNC_FAIL BIT(3) +#define AN_START BIT(2) +#define AN_END BIT(1) +#define AN_REQUEST BIT(0) + +#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 + +#define SGMII_PHY_INTERRUPT_ERR (\ + DECODE_CODE_ERR |\ + DECODE_DISP_ERR) + +#define SGMII_ISR_AN_MASK (\ + AN_REQUEST |\ + AN_START |\ + AN_END |\ + AN_ILLEGAL_TERM |\ + PLL_UNLOCK |\ + SYNC_FAIL) + +#define SGMII_ISR_MASK (\ + SGMII_PHY_INTERRUPT_ERR |\ + SGMII_ISR_AN_MASK) + +/* SGMII TX_CONFIG */ +#define TXCFG_LINK 0x8000 +#define TXCFG_MODE_BMSK 0x1c00 +#define TXCFG_1000_FULL 0x1800 +#define TXCFG_100_FULL 0x1400 +#define TXCFG_100_HALF 0x0400 +#define TXCFG_10_FULL 0x1000 +#define TXCFG_10_HALF 0x0000 + +#define SERDES_START_WAIT_TIMES 100 + +struct emac_reg_write { + ulong offset; +#define END_MARKER 0xffffffff + u32 val; +}; + +typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); + +/** emac_sgmii - internal sgmii phy + * @base base address + * @digital per-lane digital block + * @irq interrupt number + * @initialize initialization function + */ +struct emac_sgmii { + void __iomem *base; + void __iomem *digital; + int irq; + emac_sgmii_initialize initialize; +}; + +extern struct emac_phy_ops emac_sgmii_ops; + +#endif /*_EMAC_SGMII_H_*/ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index e57c92292013..246fc9231768 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -168,6 +168,8 @@ #define DWMAC4_PCS_BASE 0x000000e0 #define RGMII_CONFIG_10M_CLK_DVD GENMASK(18, 10) +static int phytype = -1; +static int boardtype = -1; void *ipc_emac_log_ctxt; struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0}; @@ -177,6 +179,14 @@ struct plat_stmmacenet_data *plat_dat; struct qcom_ethqos *pethqos; #ifdef MODULE +static char *board; +module_param(board, charp, 0660); +MODULE_PARM_DESC(board, "board type of the device"); + +static char *enet; +module_param(enet, charp, 0660); +MODULE_PARM_DESC(enet, "enet value for the phy connection"); + static char *eipv4; module_param(eipv4, charp, 0660); MODULE_PARM_DESC(eipv4, "ipv4 value from ethernet partition"); @@ -203,6 +213,28 @@ static unsigned char dev_addr[ETH_ALEN] = { 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7}; static struct ip_params pparams = {"", "", "", ""}; +static int set_board_type(char *board_params) +{ + if (!strcmp(board_params, "Air")) + boardtype = AIR_BOARD; + else if (!strcmp(board_params, "Star")) + boardtype = STAR_BOARD; + else + return -1; + return 0; +} + +static int set_phy_type(char *enet_params) +{ + if (!strcmp(enet_params, "1") || !strcmp(enet_params, "2")) + phytype = PHY_1G; + else if (!strcmp(enet_params, "3") || !strcmp(enet_params, "6")) + phytype = PHY_25G; + else + return -1; + return 0; +} + static int set_early_ethernet_ipv4(char *ipv4_addr_in) { int ret = 1; @@ -278,6 +310,11 @@ fail: } #ifndef MODULE + +__setup("board=", set_board_type); + +__setup("enet=", set_phy_type); + static int __init set_early_ethernet_ipv4_static(char *ipv4_addr_in) { int ret = 1; @@ -338,8 +375,11 @@ static int qcom_ethqos_add_ipaddr(struct ip_params *ip_info, } else { ETHQOSINFO("Assigned IPv4 address: %s\r\n", ip_info->ipv4_addr_str); - +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Etherent Assigned IPv4 address"); +#else ETHQOSINFO("M - Etherent Assigned IPv4 address\n"); +#endif } return res; } @@ -385,8 +425,11 @@ static int qcom_ethqos_add_ipv6addr(struct ip_params *ip_info, } else { ETHQOSDBG("Assigned IPv6 address: %s\r\n", ip_info->ipv6_addr_str); - +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Ethernet Assigned IPv6 address"); +#else ETHQOSINFO("M - Ethernet Assigned IPv6 address\n"); +#endif } return ret; } @@ -2139,6 +2182,18 @@ static int ethqos_set_early_eth_param(struct stmmac_priv *priv, return 0; } +static void qcom_ethqos_disable_phy_clks(struct qcom_ethqos *ethqos) +{ + ETHQOSINFO("Enter\n"); + + if (ethqos->phyaux_clk) + clk_disable_unprepare(ethqos->phyaux_clk); + if (ethqos->sgmiref_clk) + clk_disable_unprepare(ethqos->sgmiref_clk); + + ETHQOSINFO("Exit\n"); +} + static void qcom_ethqos_request_phy_wol(void *plat_n) { struct plat_stmmacenet_data *plat = plat_n; @@ -2217,10 +2272,19 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ETHQOSERR("Error creating logging context for emac\n"); else ETHQOSDBG("IPC logging has been enabled for emac\n"); - +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Ethernet probe start"); +#else ETHQOSINFO("M - Ethernet probe start\n"); +#endif #ifdef MODULE + if (enet) + ret = set_phy_type(enet); + + if (board) + ret = set_board_type(board); + if (eipv4) ret = set_early_ethernet_ipv4(eipv4); @@ -2362,7 +2426,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev) } ETHQOSDBG("gdsc-off-on-suspend = %d\n", ethqos->gdsc_off_on_suspend); - + plat_dat->phy_type = phytype; + plat_dat->board_type = boardtype; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) goto err_clk; @@ -2412,8 +2477,11 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos_set_early_eth_param(priv, ethqos); } atomic_set(&priv->plat->phy_clks_suspended, 0); - +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Ethernet probe end"); +#else ETHQOSINFO("M - Ethernet probe end\n"); +#endif return ret; err_clk: @@ -2421,7 +2489,11 @@ err_clk: err_mem: stmmac_remove_config_dt(pdev, plat_dat); - + if (ethqos) { + ethqos->driver_load_fail = true; + qcom_ethqos_disable_phy_clks(ethqos); + ethqos_disable_regulators(ethqos); + } return ret; } @@ -2489,13 +2561,18 @@ static int qcom_ethqos_suspend(struct device *dev) return 0; } - if (pm_suspend_target_state == PM_SUSPEND_MEM) - return qcom_ethqos_hib_freeze(dev); - ethqos = get_stmmac_bsp_priv(dev); if (!ethqos) return -ENODEV; + if (ethqos->driver_load_fail) { + ETHQOSINFO("driver load failed\n"); + return 0; + } + + if (pm_suspend_target_state == PM_SUSPEND_MEM) + return qcom_ethqos_hib_freeze(dev); + ndev = dev_get_drvdata(dev); if (!ndev) return -EINVAL; @@ -2527,14 +2604,19 @@ static int qcom_ethqos_resume(struct device *dev) if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded")) return 0; - if (pm_suspend_target_state == PM_SUSPEND_MEM) - return qcom_ethqos_hib_restore(dev); - ethqos = get_stmmac_bsp_priv(dev); if (!ethqos) return -ENODEV; + if (ethqos->driver_load_fail) { + ETHQOSINFO("driver load failed\n"); + return 0; + } + + if (pm_suspend_target_state == PM_SUSPEND_MEM) + return qcom_ethqos_hib_restore(dev); + if (ethqos->gdsc_off_on_suspend) { ret = regulator_enable(ethqos->gdsc_emac); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h index 64e29740cc70..c3578a896b47 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h @@ -177,6 +177,7 @@ struct qcom_ethqos { struct delayed_work ipv4_addr_assign_wq; struct delayed_work ipv6_addr_assign_wq; bool early_eth_enabled; + bool driver_load_fail; /* Key Performance Indicators */ bool print_kpi; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 502902077a34..bbacdbf41939 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -23,6 +23,7 @@ #include #include #include +#include struct stmmac_resources { void __iomem *addr; @@ -55,6 +56,11 @@ struct stmmac_tx_info { #define STMMAC_TBS_AVAIL BIT(0) #define STMMAC_TBS_EN BIT(1) +#define AIR_BOARD 1 +#define STAR_BOARD 2 +#define PHY_1G 1 +#define PHY_25G 2 + /* Frequently used values are kept adjacent for cache effect */ struct stmmac_tx_queue { u32 tx_count_frames; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 91fa1a31c865..82d3ca95d341 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1229,7 +1229,11 @@ static void stmmac_mac_link_up(struct phylink_config *config, stmmac_fpe_link_state_handle(priv, true); if (!priv->boot_kpi) { +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Ethernet is Ready.Link is UP"); +#else pr_info("M - Ethernet is Ready.Link is UP\n"); +#endif priv->boot_kpi = true; } } @@ -2854,7 +2858,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) priv->xstats.txq_stats[queue].tx_pkt_n++; if (priv->dev->stats.tx_packets == 1) +#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY)) + bootmarker_place_marker("M - Ethernet first pkt xmit"); +#else pr_info("M - Ethernet first packet transmitted\n"); +#endif } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@ -7409,8 +7417,13 @@ int stmmac_dvr_probe(struct device *device, u32 rxq; int i, ret = 0; - ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), - MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES); + if (of_property_read_bool(device->of_node, "virtio-mdio")) + ndev = alloc_netdev_mqs(sizeof(struct stmmac_priv), "eth2", NET_NAME_ENUM, + ether_setup, MTL_MAX_TX_QUEUES, MTL_MAX_TX_QUEUES); + else + ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv), + MTL_MAX_TX_QUEUES, MTL_MAX_TX_QUEUES); + if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index d17149690050..2a6a5f73150a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -433,6 +433,76 @@ int stmmac_xpcs_setup(struct mii_bus *bus) return 0; } +/** + * stmmac_get_phy_addr + * @priv: net device structure + * @new_bus: points to the mii_bus structure + * Description: it finds the PHY address from board and phy_type + */ +int stmmac_get_phy_addr(struct stmmac_priv *priv, struct mii_bus *new_bus, + struct net_device *ndev) +{ + struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; + struct device_node *np = priv->device->of_node; + unsigned int phyaddr; + int err = 0; + + init_completion(&priv->plat->mdio_op); + new_bus->reset = &stmmac_mdio_reset; + new_bus->priv = ndev; + + if (priv->plat->phy_type != -1) { + if (priv->plat->phy_type == PHY_1G) { + err = of_property_read_u32(np, "emac-1g-phy-addr", &phyaddr); + new_bus->read = &virtio_mdio_read; + new_bus->write = &virtio_mdio_write; + } else { + new_bus->read = &virtio_mdio_read_c45_indirect; + new_bus->write = &virtio_mdio_write_c45_indirect; + new_bus->probe_capabilities = MDIOBUS_C22_C45; + if (priv->plat->phy_type == PHY_25G && + priv->plat->board_type == STAR_BOARD) { + err = of_property_read_u32(np, + "emac-star-cl45-phy-addr", &phyaddr); + } else { + err = of_property_read_u32(np, + "emac-air-cl45-phy-addr", &phyaddr); + } + } + } else { + err = of_property_read_u32(np, "emac-1g-phy-addr", &phyaddr); + if (err) { + new_bus->phy_mask = mdio_bus_data->phy_mask; + return -1; + } + new_bus->read = &virtio_mdio_read; + new_bus->write = &virtio_mdio_write; + /* Do MDIO reset before the bus->read call */ + err = new_bus->reset(new_bus); + if (err) { + new_bus->phy_mask = ~(1 << phyaddr); + return phyaddr; + } + /* 1G phy check */ + err = new_bus->read(new_bus, phyaddr, MII_BMSR); + if (err == -EBUSY || err == 0xffff) { + /* 2.5 G PHY case */ + new_bus->read = &virtio_mdio_read_c45_indirect; + new_bus->write = &virtio_mdio_write_c45_indirect; + new_bus->probe_capabilities = MDIOBUS_C22_C45; + + err = of_property_read_u32(np, + "emac-air-cl45-phy-addr", &phyaddr); + /* Board Type check */ + err = new_bus->read(new_bus, phyaddr, MII_BMSR); + if (err == -EBUSY || !err || err == 0xffff) + err = of_property_read_u32(np, + "emac-star-cl45-phy-addr", &phyaddr); + } + } + new_bus->phy_mask = ~(1 << phyaddr); + return phyaddr; +} /** * stmmac_mdio_register * @ndev: net device structure @@ -474,10 +544,9 @@ int stmmac_mdio_register(struct net_device *ndev) err = of_property_read_bool(np, "virtio-mdio"); if (err) { - new_bus->read = &virtio_mdio_read; - new_bus->write = &virtio_mdio_write; - init_completion(&priv->plat->mdio_op); + phyaddr = stmmac_get_phy_addr(priv, new_bus, ndev); max_addr = PHY_MAX_ADDR; + skip_phy_detect = 1; } else if (priv->plat->has_xgmac) { new_bus->read = &stmmac_xgmac2_mdio_read; new_bus->write = &stmmac_xgmac2_mdio_write; @@ -501,24 +570,6 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - err = of_property_read_u32(np, "emac-phy-addr", &phyaddr); - if (err) { - new_bus->phy_mask = mdio_bus_data->phy_mask; - } else { - err = new_bus->read(new_bus, phyaddr, MII_BMSR); - if (err == -EBUSY || !err || err == 0xffff) { - err = of_property_read_u32(np, "emac-cl45-phy-addr", &phyaddr); - new_bus->phy_mask = ~(1 << phyaddr); - skip_phy_detect = 1; - new_bus->read = &virtio_mdio_read_c45_indirect; - new_bus->write = &virtio_mdio_write_c45_indirect; - new_bus->probe_capabilities = MDIOBUS_C22_C45; - } else { - new_bus->phy_mask = ~(1 << phyaddr); - skip_phy_detect = 1; - } - } - new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index c57a0262fb64..b85c50d43a72 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -367,6 +367,15 @@ config XILINX_GMII2RGMII the Reduced Gigabit Media Independent Interface(RGMII) between Ethernet physical media devices and the Gigabit Ethernet controller. +config QCA8337_SWITCH + tristate "Drivers for QTI Atheros QCA8337 switch" + help + This enables support for the QTI Atheros QCA8337 Ethernet + switch. This driver support switch funtionality over SGMII + interface. + Add downstream qca8337 driver + Support the emac driver + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index f7138d3c896b..c7b6b65e0d90 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -88,3 +88,4 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_TERANETICS_PHY) += teranetics.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o +obj-$(CONFIG_QCA8337_SWITCH) += qca8337.o diff --git a/drivers/net/phy/qca8337.c b/drivers/net/phy/qca8337.c new file mode 100644 index 000000000000..13230982afec --- /dev/null +++ b/drivers/net/phy/qca8337.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (c) 2014, 2015, 2017, The Linux Foundation. All rights reserved. + * Copyright (C) 2009 Felix Fietkau + * Copyright (C) 2011-2012 Gabor Juhos + * Copyright (c) 2016 John Crispin john@phrozen.org + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * Author: Matus Ujhelyi + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +/* QCA8337 Switch driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline void split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) +{ + regaddr >>= 1; + *r1 = regaddr & 0x1e; + + regaddr >>= 5; + *r2 = regaddr & 0x7; + + regaddr >>= 3; + *page = regaddr & 0x1ff; +} + +u32 qca8337_read(struct qca8337_priv *priv, u32 reg) +{ + struct phy_device *phy = priv->phy; + struct mii_bus *bus = phy->mdio.bus; + u16 r1, r2, page; + u16 lo, hi; + + mutex_lock(&bus->mdio_lock); + + split_addr(reg, &r1, &r2, &page); + + bus->write(bus, 0x18, 0, page); + usleep_range(1000, 2000); /* wait for the page switch to propagate */ + lo = bus->read(bus, 0x10 | r2, r1); + hi = bus->read(bus, 0x10 | r2, r1 + 1); + + mutex_unlock(&bus->mdio_lock); + + return (hi << 16) | lo; +} +EXPORT_SYMBOL_GPL(qca8337_read); + +void qca8337_write(struct qca8337_priv *priv, u32 reg, u32 val) +{ + struct phy_device *phy = priv->phy; + struct mii_bus *bus = phy->mdio.bus; + u16 r1, r2, r3; + u16 lo, hi; + + mutex_lock(&bus->mdio_lock); + + split_addr(reg, &r1, &r2, &r3); + lo = val & 0xffff; + hi = (u16)(val >> 16); + + bus->write(bus, 0x18, 0, r3); + usleep_range(1000, 2000); /* wait for the page switch to propagate */ + bus->write(bus, 0x10 | r2, r1, lo); + bus->write(bus, 0x10 | r2, r1 + 1, hi); + + mutex_unlock(&bus->mdio_lock); +} +EXPORT_SYMBOL_GPL(qca8337_write); + +static u32 +qca8337_rmw(struct qca8337_priv *priv, u32 reg, u32 mask, u32 val) +{ + u32 ret; + + ret = priv->ops->read(priv, reg); + ret &= ~mask; + ret |= val; + priv->ops->write(priv, reg, ret); + return ret; +} + +static void +qca8337_reg_set(struct qca8337_priv *priv, u32 reg, u32 val) +{ + qca8337_rmw(priv, reg, 0, val); +} + +static void qca8337_reset_switch(struct qca8337_priv *priv) +{ + u32 val = 0; + int count = 0; + + qca8337_reg_set(priv, QCA8337_REG_MASK_CTRL, QCA8337_CTRL_RESET); + + /*Need wait so reset done*/ + for (count = 0; count < 100; count++) { + usleep_range(5000, 10000); + + val = priv->ops->read(priv, QCA8337_REG_MASK_CTRL); + if (!val && !(val & QCA8337_CTRL_RESET)) + break; + } +} + +static void +qca8337_port_set_status(struct qca8337_priv *priv) +{ + qca8337_write(priv, QCA8337_REG_PORT_STATUS(0), + (QCA8337_PORT_SPEED_1000M | QCA8337_PORT_STATUS_TXMAC | + QCA8337_PORT_STATUS_RXMAC | QCA8337_PORT_STATUS_TXFLOW | + QCA8337_PORT_STATUS_RXFLOW | QCA8337_PORT_STATUS_DUPLEX)); + + qca8337_write(priv, QCA8337_REG_PORT_STATUS(6), + (QCA8337_PORT_SPEED_1000M | QCA8337_PORT_STATUS_TXMAC | + QCA8337_PORT_STATUS_RXMAC | QCA8337_PORT_STATUS_TXFLOW | + QCA8337_PORT_STATUS_RXFLOW | QCA8337_PORT_STATUS_DUPLEX)); +} + +static int +qca8337_busy_wait(struct qca8337_priv *priv, u32 reg, u32 mask) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(20); + + /* loop until the busy flag has cleared */ + do { + u32 val = priv->ops->read(priv, reg); + int busy = val & mask; + + if (!busy) + break; + cond_resched(); + } while (!time_after_eq(jiffies, timeout)); + + return time_after_eq(jiffies, timeout); +} + +static void +qca8337_mib_init(struct qca8337_priv *priv) +{ + qca8337_reg_set(priv, QCA8337_REG_MIB, + QCA8337_MIB_FLUSH | QCA8337_MIB_BUSY); + qca8337_busy_wait(priv, QCA8337_REG_MIB, QCA8337_MIB_BUSY); + qca8337_reg_set(priv, QCA8337_REG_MIB, QCA8337_MIB_CPU_KEEP); + priv->ops->write(priv, QCA8337_REG_MODULE_EN, QCA8337_MODULE_EN_MIB); +} + +static void qca8337_vlan_config(struct qca8337_priv *priv) +{ + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(0), 0x0014007e); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(0), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(1), 0x0014007d); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(1), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(2), 0x0014007b); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(2), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(3), 0x00140077); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(3), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(4), 0x0014006f); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(4), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(5), 0x0014005f); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(5), 0x10001); + + priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(6), 0x0014001e); + priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(6), 0x10001); +} + +static int qca8337_hw_init(struct qca8337_priv *priv) +{ + int i; + + /* set pad control for cpu port */ + qca8337_write(priv, QCA8337_REG_PAD0_CTRL, QCA8337_PAD_SGMII_EN); + + qca8337_write(priv, QCA8337_REG_PAD5_CTRL, + QCA8337_PAD_RGMII_RXCLK_DELAY_EN); + + qca8337_write(priv, QCA8337_REG_PAD6_CTRL, + (QCA8337_PAD_RGMII_EN | QCA8337_PAD_RGMII_RXCLK_DELAY_EN | + (0x1 << QCA8337_PAD_RGMII_TXCLK_DELAY_SEL_S) | + (0x2 << QCA8337_PAD_RGMII_RXCLK_DELAY_SEL_S))); + + /* Enable CPU Port */ + qca8337_reg_set(priv, QCA8337_REG_GLOBAL_FW_CTRL0, + QCA8337_GLOBAL_FW_CTRL0_CPU_PORT_EN); + + qca8337_port_set_status(priv); + + /* Enable MIB counters */ + qca8337_mib_init(priv); + + /* Disable QCA header mode on the cpu port */ + priv->ops->write(priv, QCA8337_REG_PORT_HEADER(priv->cpu_port), 0); + + /* Disable forwarding by default on all ports */ + for (i = 0; i < priv->ports; i++) + qca8337_rmw(priv, QCA8337_REG_PORT_LOOKUP(i), + QCA8337_PORT_LOOKUP_MEMBER, 0); + + qca8337_write(priv, QCA8337_REG_GLOBAL_FW_CTRL1, + (QCA8337_IGMP_JOIN_LEAVE_DPALL | QCA8337_BROAD_DPALL | + QCA8337_MULTI_FLOOD_DPALL | QCA8337_UNI_FLOOD_DPALL)); + + /* Setup connection between CPU port & user ports */ + qca8337_vlan_config(priv); + + /* Disable AZ */ + priv->ops->write(priv, QCA8337_REG_EEE_CTRL, QCA8337_EEE_CTRL_DISABLE); + return 0; +} + +static void qca8337_reg_init_lan(struct qca8337_priv *priv) +{ + priv->ops->write(priv, QCA8337_REG_POWER_ON_STRIP, + QCA8337_REG_POS_VAL); + priv->ops->write(priv, QCA8337_MAC_PWR_SEL, + QCA8337_MAC_PWR_SEL_VAL); + priv->ops->write(priv, QCA8337_SGMII_CTRL_REG, + QCA8337_SGMII_CTRL_VAL); +} + +static void +qca8337_read_port_link(struct qca8337_priv *priv, int port, + struct port_link_info *port_link) +{ + u32 status; + u32 speed; + + memset(port_link, '\0', sizeof(*port_link)); + + status = priv->ops->read(priv, QCA8337_REG_PORT_STATUS(port)); + + port_link->aneg = !!(status & QCA8337_PORT_STATUS_LINK_AUTO); + if (port_link->aneg || port != priv->cpu_port) { + port_link->link = !!(status & QCA8337_PORT_STATUS_LINK_UP); + if (!port_link->link) + return; + } else { + port_link->link = true; + } + + port_link->duplex = !!(status & QCA8337_PORT_STATUS_DUPLEX); + port_link->tx_flow = !!(status & QCA8337_PORT_STATUS_TXFLOW); + port_link->rx_flow = !!(status & QCA8337_PORT_STATUS_RXFLOW); + + speed = (status & QCA8337_PORT_STATUS_SPEED) >> + QCA8337_PORT_STATUS_SPEED_S; + + switch (speed) { + case QCA8337_PORT_SPEED_10M: + port_link->speed = SPEED_10; + break; + case QCA8337_PORT_SPEED_100M: + port_link->speed = SPEED_100; + break; + case QCA8337_PORT_SPEED_1000M: + port_link->speed = SPEED_1000; + break; + default: + port_link->speed = SPEED_UNKNOWN; + break; + } +} + +static void qca8337_phy_enable(struct phy_device *phydev) +{ + int phyid = 0; + ushort phy_val; + struct mii_bus *bus; + struct qca8337_priv *priv = phydev->priv; + + bus = priv->phy->mdio.bus; + + if (phydev->autoneg == AUTONEG_ENABLE) { + int port; + + for (port = 1; port < priv->ports - 1; port++) + qca8337_write(priv, QCA8337_REG_PORT_STATUS(port), + 0x1280); + + for (phyid = 0; phyid < priv->num_phy ; phyid++) { + /*enable phy prefer multi-port mode*/ + phy_val = mdiobus_read(bus, phyid, MII_CTRL1000); + phy_val |= (ADVERTISE_MULTI_PORT_PREFER | + ADVERTISE_1000FULL); + mdiobus_write(bus, phyid, MII_CTRL1000, phy_val); + + /*enable extended next page. 0:enable, 1:disable*/ + phy_val = mdiobus_read(bus, phyid, MII_ADVERTISE); + phy_val &= (~(ADVERTISE_RESV)); + mdiobus_write(bus, phyid, MII_ADVERTISE, phy_val); + + /*Phy power up*/ + mdiobus_write(bus, phyid, MII_BMCR, (BMCR_RESET | + BMCR_ANENABLE)); + /* wait for the page switch to propagate */ + usleep_range(100, 200); + } + } else { + int port; + u32 status = 0; + + linkmode_and(phydev->advertising, phydev->advertising, phydev->supported); + + for (port = 1; port < priv->ports - 1; port++) { + status = 0; + status |= phydev->duplex ? + QCA8337_PORT_STATUS_DUPLEX : 0; + status |= (linkmode_test_bit(ADVERTISED_Asym_Pause, phydev->advertising)) ? + QCA8337_PORT_STATUS_TXFLOW : 0; + status |= (linkmode_test_bit(ADVERTISED_Pause, phydev->advertising)) ? + QCA8337_PORT_STATUS_RXFLOW : 0; + + if (phydev->speed == SPEED_1000) + status |= QCA8337_PORT_SPEED_1000M; + else if (phydev->speed == SPEED_100) + status |= QCA8337_PORT_SPEED_100M; + else if (phydev->speed == SPEED_10) + status |= QCA8337_PORT_SPEED_10M; + + qca8337_write(priv, QCA8337_REG_PORT_STATUS(port), + status); + /* wait for the page switch to propagate */ + usleep_range(100, 200); + + status |= QCA8337_PORT_STATUS_TXMAC | + QCA8337_PORT_STATUS_RXMAC; + qca8337_write(priv, QCA8337_REG_PORT_STATUS(port), + status); + } + + for (phyid = 0; phyid < priv->num_phy ; phyid++) { + phydev->drv->phy_id = phyid; + genphy_setup_forced(phydev); + } + + for (phyid = 0; phyid < priv->num_phy ; phyid++) { + phydev->drv->phy_id = phyid; + genphy_update_link(phydev); + + if (phydev->link) + break; + } + } +} + +static int qca8337_config_aneg(struct phy_device *phydev) +{ + qca8337_phy_enable(phydev); + + return 0; +} + +static int qca8337_read_status(struct phy_device *phydev) +{ + struct qca8337_priv *priv = phydev->priv; + struct port_link_info port_link; + int i, port_status = 0; + int speed = -1, duplex = 0; + + for (i = 1; i < priv->ports - 1; i++) { + qca8337_read_port_link(priv, i, &port_link); + + if (port_link.link) { + speed = (speed < port_link.speed) ? + port_link.speed : speed; + duplex = (duplex < port_link.duplex) ? + port_link.duplex : duplex; + port_status |= 1 << i; + } + } + + qca8337_read_port_link(priv, priv->cpu_port, &port_link); + phydev->link = (port_status) ? !!port_link.link : 0; + phydev->speed = speed; + phydev->duplex = duplex; + + return 0; +} + +static int qca8337_aneg_done(struct phy_device *phydev) +{ + int phyid = 0; + int retval = 0; + int aneg_status = 0; + struct qca8337_priv *priv = phydev->priv; + struct mii_bus *bus = priv->phy->mdio.bus; + + for (phyid = 0; phyid < priv->num_phy ; phyid++) { + retval = mdiobus_read(bus, phyid, MII_BMSR); + if (retval < 0) + return retval; + + (retval & BMSR_ANEGCOMPLETE) ? + (aneg_status |= 1 << phyid) : + (aneg_status |= 0 << phyid); + } + return aneg_status; +} + +static int +qca8337_regmap_read(void *ctx, uint32_t reg, uint32_t *val) +{ + struct qca8337_priv *priv = (struct qca8337_priv *)ctx; + + if (!priv->phy->link) + return -EPERM; + + *val = priv->ops->read(priv, reg); + return 0; +} + +static int +qca8337_regmap_write(void *ctx, uint32_t reg, uint32_t val) +{ + struct qca8337_priv *priv = (struct qca8337_priv *)ctx; + + if (!priv->phy->link) + return -EPERM; + + priv->ops->write(priv, reg, val); + return 0; +} + +static const struct regmap_range qca8337_readable_ranges[] = { + regmap_reg_range(0x0000, 0x00e4), /* Global control registers */ + regmap_reg_range(0x0100, 0x0168), /* EEE control registers */ + regmap_reg_range(0x0200, 0x0270), /* Parser control registers */ + regmap_reg_range(0x0400, 0x0454), /* ACL control registers */ + regmap_reg_range(0x0600, 0x0718), /* Lookup control registers */ + regmap_reg_range(0x0800, 0x0b70), /* QM control registers */ + regmap_reg_range(0x0c00, 0x0c80), /* PKT edit control registers */ + regmap_reg_range(0x0e00, 0x0e98), /* L3 */ + regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */ + regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */ + regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */ + regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */ + regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */ + regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */ + regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */ +}; + +static const struct regmap_access_table qca8337_readable_table = { + .yes_ranges = qca8337_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(qca8337_readable_ranges), +}; + +static struct regmap_config qca8337_regmap_config = { + .reg_bits = 16, + .val_bits = 32, + .reg_stride = 4, + .max_register = 0x16ac, /* end MIB - Port6 range */ + .reg_read = qca8337_regmap_read, + .reg_write = qca8337_regmap_write, + .rd_table = &qca8337_readable_table, +}; + +static int qca8337_config_init(struct phy_device *phydev) +{ + struct qca8337_priv *priv = phydev->priv; + int ret = 0; + + /*Software reset*/ + priv->ops->reset_switch(priv); + /* Add delay to settle reset */ + usleep_range(100, 200); + + ret = priv->ops->hw_init(priv); + if (ret) + return ret; + + qca8337_reg_init_lan(priv); + return 0; +} + +static struct qca8337_switch_ops switch_ops = { + .hw_init = qca8337_hw_init, + .reset_switch = qca8337_reset_switch, + .read = qca8337_read, + .write = qca8337_write, +}; + +static int qca8337_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct qca8337_priv *priv = NULL; + u32 val = 0; + u16 id = 0; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->phy = phydev; + priv->dev = &phydev->mdio.dev; + priv->cpu_port = QCA8337_CPU_PORT; + priv->vlans = QCA8337_MAX_VLANS; + priv->ports = QCA8337_NUM_PORTS; + priv->num_phy = QCA8337_NUM_PHYS; + priv->ops = &switch_ops; + + /* Setup the register mapping */ + priv->regmap = devm_regmap_init(priv->dev, NULL, priv, + &qca8337_regmap_config); + if (IS_ERR(priv->regmap)) + pr_warn("regmap initialization failed\n"); + + /* read the switches ID register */ + val = qca8337_read(priv, QCA8337_REG_MASK_CTRL); + id = val & (QCA8337_CTRL_REVISION | QCA8337_CTRL_VERSION); + + priv->chip_ver = (id & QCA8337_CTRL_VERSION) >> QCA8337_CTRL_VERSION_S; + priv->chip_rev = (id & QCA8337_CTRL_REVISION); + + if (priv->chip_ver != QCA8337_ID_QCA8337) { + dev_err(dev, "qca8337: unknown Atheros device\n"); + dev_err(dev, "[ver=%d, rev=%d, phy_id=%04x%04x]\n", + priv->chip_ver, priv->chip_rev, + mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 2), + mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 3)); + + return -ENODEV; + } + + dev_dbg(dev, "qca8337: Switch probed successfully "); + dev_dbg(dev, "[ver=%d, rev=%d, phy_id=%04x%04x]\n", + priv->chip_ver, priv->chip_rev, + mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 2), + mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 3)); + + phydev->priv = priv; + return 0; +} + +static void qca8337_remove(struct phy_device *phydev) +{ + struct qca8337_priv *priv = phydev->priv; + + if (!priv) + return; +} + +static struct phy_driver qca8337_driver = { + .phy_id = QCA8337_PHY_ID, + .name = "Atheros QCA8337", + .phy_id_mask = 0xffffffef, + .probe = qca8337_probe, + .config_init = qca8337_config_init, + .features = PHY_GBIT_FEATURES, + .flags = PHY_IS_INTERNAL, + .config_aneg = qca8337_config_aneg, + .read_status = qca8337_read_status, + .aneg_done = qca8337_aneg_done, + .remove = qca8337_remove, +}; + +static int __init qca8337_init(void) +{ + return phy_driver_register(&qca8337_driver, THIS_MODULE); +} + +static void __exit qca8337_exit(void) +{ + phy_driver_unregister(&qca8337_driver); +} + +module_init(qca8337_init); +module_exit(qca8337_exit); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:qca8337"); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 3fa1f0e0cc81..80a5847acccd 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -37,6 +37,7 @@ source "drivers/net/wireless/st/Kconfig" source "drivers/net/wireless/ti/Kconfig" source "drivers/net/wireless/zydas/Kconfig" source "drivers/net/wireless/quantenna/Kconfig" +source "drivers/net/wireless/cnss/Kconfig" config PCMCIA_RAYCS tristate "Aviator/Raytheon 2.4GHz wireless support" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index a61cf6c90343..3d926e17dfe6 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -32,3 +32,4 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o obj-$(CONFIG_VIRT_WIFI) += virt_wifi.o +obj-$(CONFIG_CNSS) += cnss/ diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig new file mode 100644 index 000000000000..b423784494bf --- /dev/null +++ b/drivers/net/wireless/cnss/Kconfig @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# cnss device configuration +# + +config CNSS + tristate "CNSS driver for wifi module" + select CNSS_UTILS + select CRYPTO + select CRYPTO_HASH + select CRYPTO_BLKCIPHER + help + This module adds support for the CNSS connectivity subsystem used + for wifi devices based on the QCA AR6320 chipset. + This driver also adds support to integrate WLAN module to subsystem + restart framework. + +config CNSS_SDIO + bool "Enable/disable cnss sdio platform driver for wifi module" + depends on CNSS + depends on MMC + help + This module adds support for the CNSS wlan module interfaced + with SDIO bus. + This driver also adds support to integrate WLAN module to subsystem + restart framework, power on WLAN chip and registered the WLAN module + as a SDIO client device. + +config CNSS_MAC_BUG + bool "Enable/disable 0-4K memory initialization for QCA6174" + depends on CNSS + help + If enabled, 0-4K memory is reserved for QCA6174 to address + a MAC HW bug. MAC would do an invalid pointer fetch based on + the data, that was read from 0 to 4K. So fill it with zero's; + to an address for which PCIe root complex would honor the read + without any errors. + +config CLD_DEBUG + bool "Enable/disable CLD debug features" + help + WLAN CLD driver uses this config to enable certain debug features. + Some of the debug features may affect performance or may compromise + on security. + + Say N, if you are building a release kernel for production use. + Only say Y, if you are building a kernel with debug support. + +config CLD_USB_CORE + tristate "Qualcomm Technologies Inc. Core wlan driver for QCA USB interface" + select WIRELESS_EXT + select WEXT_PRIV + select WEXT_CORE + select WEXT_SPY + select NL80211_TESTMODE + help + This section contains the necessary modules needed to enable the + core WLAN driver for Qualcomm Technologies Inc USB wlan chipset. + Select Y to compile the driver in order to have WLAN functionality + support. + +config CLD_HL_SDIO_CORE + tristate "Qualcomm Technologies Inc. Core wlan driver for QCA SDIO interface" + select WIRELESS_EXT + select WEXT_PRIV + select WEXT_CORE + select WEXT_SPY + select NL80211_TESTMODE + depends on ARCH_QCOM + depends on MMC + +config CLD_LL_CORE + tristate "Qualcomm Technologies Inc. Core wlan driver" + select NL80211_TESTMODE + select WEXT_CORE + select WEXT_PRIV + select WEXT_SPY + select WIRELESS_EXT + help + This section contains the necessary modules needed to enable the + core WLAN driver for Qualcomm Technologies Inc QCA6174 chipset. + Select Y to compile the driver in order to have WLAN functionality + support. + +config CNSS_SECURE_FW + bool "Enable/Disable Memory Allocation for Secure Firmware Feature" + depends on CNSS + help + CLD Driver can use this for holding local copy of firmware + binaries which is used for sha crypto computation. + The Memory Allocation is done only if this Config Parameter is + enabled + +config WLAN_FEATURE_RX_WAKELOCK + bool "Enable RX wake lock feature" + help + Enable WLAN_FEATURE_HOLD_RX_WAKELOCK which is required to take rx + wakelock when driver receives packets from fw. + diff --git a/drivers/net/wireless/cnss/Makefile b/drivers/net/wireless/cnss/Makefile new file mode 100644 index 000000000000..4298c42aa418 --- /dev/null +++ b/drivers/net/wireless/cnss/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for CNSS platform driver +# + +obj-$(CONFIG_CNSS_SDIO) += cnss_sdio.o +obj-$(CONFIG_CNSS) += cnss_common.o diff --git a/drivers/net/wireless/cnss/cnss_common.c b/drivers/net/wireless/cnss/cnss_common.c new file mode 100644 index 000000000000..a37d4bbe9ba5 --- /dev/null +++ b/drivers/net/wireless/cnss/cnss_common.c @@ -0,0 +1,438 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "cnss_common.h" +#include + +#define AR6320_REV1_VERSION 0x5000000 +#define AR6320_REV1_1_VERSION 0x5000001 +#define AR6320_REV1_3_VERSION 0x5000003 +#define AR6320_REV2_1_VERSION 0x5010000 +#define AR6320_REV3_VERSION 0x5020000 +#define AR6320_REV3_2_VERSION 0x5030000 +#define AR900B_DEV_VERSION 0x1000000 +#define QCA9377_REV1_1_VERSION 0x5020001 + +static struct cnss_fw_files FW_FILES_QCA6174_FW_1_1 = { + "qwlan11.bin", "bdwlan11.bin", "otp11.bin", "utf11.bin", + "utfbd11.bin", "epping11.bin", "evicted11.bin"}; +static struct cnss_fw_files FW_FILES_QCA6174_FW_2_0 = { + "qwlan20.bin", "bdwlan20.bin", "otp20.bin", "utf20.bin", + "utfbd20.bin", "epping20.bin", "evicted20.bin"}; +static struct cnss_fw_files FW_FILES_QCA6174_FW_1_3 = { + "qwlan13.bin", "bdwlan13.bin", "otp13.bin", "utf13.bin", + "utfbd13.bin", "epping13.bin", "evicted13.bin"}; +static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = { + "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin", + "utfbd30.bin", "epping30.bin", "evicted30.bin"}; +static struct cnss_fw_files FW_FILES_DEFAULT = { + "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin", + "utfbd.bin", "epping.bin", "evicted.bin"}; + +enum cnss_dev_bus_type { + CNSS_BUS_NONE = -1, + CNSS_BUS_PCI, + CNSS_BUS_SDIO +}; + +static DEFINE_MUTEX(unsafe_channel_list_lock); +static DEFINE_MUTEX(dfs_nol_info_lock); + +static struct cnss_unsafe_channel_list { + u16 unsafe_ch_count; + u16 unsafe_ch_list[CNSS_MAX_CH_NUM]; +} unsafe_channel_list; + +static struct cnss_dfs_nol_info { + void *dfs_nol_info; + u16 dfs_nol_info_len; +} dfs_nol_info; + +static enum cnss_cc_src cnss_cc_source = CNSS_SOURCE_CORE; + +int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count) +{ + mutex_lock(&unsafe_channel_list_lock); + if (!unsafe_ch_list || ch_count > CNSS_MAX_CH_NUM) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + unsafe_channel_list.unsafe_ch_count = ch_count; + + if (ch_count != 0) { + memcpy((char *)unsafe_channel_list.unsafe_ch_list, + (char *)unsafe_ch_list, ch_count * sizeof(u16)); + } + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(cnss_set_wlan_unsafe_channel); + +int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, + u16 *ch_count, u16 buf_len) +{ + mutex_lock(&unsafe_channel_list_lock); + if (!unsafe_ch_list || !ch_count) { + mutex_unlock(&unsafe_channel_list_lock); + return -EINVAL; + } + + if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) { + mutex_unlock(&unsafe_channel_list_lock); + return -ENOMEM; + } + + *ch_count = unsafe_channel_list.unsafe_ch_count; + memcpy((char *)unsafe_ch_list, + (char *)unsafe_channel_list.unsafe_ch_list, + unsafe_channel_list.unsafe_ch_count * sizeof(u16)); + mutex_unlock(&unsafe_channel_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(cnss_get_wlan_unsafe_channel); + +int cnss_wlan_set_dfs_nol(const void *info, u16 info_len) +{ + void *temp; + struct cnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + temp = kmemdup(info, info_len, GFP_KERNEL); + if (!temp) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOMEM; + } + + dfs_info = &dfs_nol_info; + kfree(dfs_info->dfs_nol_info); + + dfs_info->dfs_nol_info = temp; + dfs_info->dfs_nol_info_len = info_len; + mutex_unlock(&dfs_nol_info_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(cnss_wlan_set_dfs_nol); + +int cnss_wlan_get_dfs_nol(void *info, u16 info_len) +{ + int len; + struct cnss_dfs_nol_info *dfs_info; + + mutex_lock(&dfs_nol_info_lock); + if (!info || !info_len) { + mutex_unlock(&dfs_nol_info_lock); + return -EINVAL; + } + + dfs_info = &dfs_nol_info; + + if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) { + mutex_unlock(&dfs_nol_info_lock); + return -ENOENT; + } + + len = min(info_len, dfs_info->dfs_nol_info_len); + + memcpy(info, dfs_info->dfs_nol_info, len); + mutex_unlock(&dfs_nol_info_lock); + + return len; +} +EXPORT_SYMBOL_GPL(cnss_wlan_get_dfs_nol); + +void cnss_init_work(struct work_struct *work, work_func_t func) +{ + INIT_WORK(work, func); +} +EXPORT_SYMBOL_GPL(cnss_init_work); + +void cnss_flush_work(void *work) +{ + struct work_struct *cnss_work = work; + + cancel_work_sync(cnss_work); +} +EXPORT_SYMBOL_GPL(cnss_flush_work); + +void cnss_flush_delayed_work(void *dwork) +{ + struct delayed_work *cnss_dwork = dwork; + + cancel_delayed_work_sync(cnss_dwork); +} +EXPORT_SYMBOL_GPL(cnss_flush_delayed_work); + +void cnss_pm_wake_lock_init(struct wakeup_source **ws, const char *name) +{ + *ws = wakeup_source_register(NULL, name); +} +EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_init); + +void cnss_pm_wake_lock(struct wakeup_source *ws) +{ + __pm_stay_awake(ws); +} +EXPORT_SYMBOL_GPL(cnss_pm_wake_lock); + +void cnss_pm_wake_lock_timeout(struct wakeup_source *ws, ulong msec) +{ + __pm_wakeup_event(ws, msec); +} +EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_timeout); + +void cnss_pm_wake_lock_release(struct wakeup_source *ws) +{ + __pm_relax(ws); +} +EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_release); + +void cnss_pm_wake_lock_destroy(struct wakeup_source *ws) +{ + wakeup_source_unregister(ws); +} +EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_destroy); + +void cnss_get_monotonic_boottime(struct timespec64 *ts) +{ + ktime_get_boottime_ts64(ts); +} +EXPORT_SYMBOL_GPL(cnss_get_monotonic_boottime); + +void cnss_get_boottime(struct timespec64 *ts) +{ + ktime_get_ts64(ts); +} +EXPORT_SYMBOL_GPL(cnss_get_boottime); + +void cnss_init_delayed_work(struct delayed_work *work, work_func_t func) +{ + INIT_DELAYED_WORK(work, func); +} +EXPORT_SYMBOL_GPL(cnss_init_delayed_work); + +int cnss_vendor_cmd_reply(struct sk_buff *skb) +{ + return cfg80211_vendor_cmd_reply(skb); +} +EXPORT_SYMBOL_GPL(cnss_vendor_cmd_reply); + +int cnss_set_cpus_allowed_ptr(struct task_struct *task, ulong cpu) +{ + return set_cpus_allowed_ptr(task, cpumask_of(cpu)); +} +EXPORT_SYMBOL_GPL(cnss_set_cpus_allowed_ptr); + +/* wlan prop driver cannot invoke show_stack + * function directly, so to invoke this function it + * call wcnss_dump_stack function + */ +void cnss_dump_stack(struct task_struct *task) +{ + show_stack(task, NULL, NULL); +} +EXPORT_SYMBOL_GPL(cnss_dump_stack); + +struct cnss_dev_platform_ops *cnss_get_platform_ops(struct device *dev) +{ + if (!dev) + return NULL; + else + return dev->platform_data; +} + +int cnss_common_request_bus_bandwidth(struct device *dev, int bandwidth) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->request_bus_bandwidth) + return pf_ops->request_bus_bandwidth(bandwidth); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_common_request_bus_bandwidth); + +void *cnss_common_get_virt_ramdump_mem(struct device *dev, unsigned long *size) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->get_virt_ramdump_mem) + return pf_ops->get_virt_ramdump_mem(size); + else + return NULL; +} +EXPORT_SYMBOL_GPL(cnss_common_get_virt_ramdump_mem); + +void cnss_common_device_self_recovery(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->device_self_recovery) + pf_ops->device_self_recovery(); +} +EXPORT_SYMBOL_GPL(cnss_common_device_self_recovery); + +void cnss_common_schedule_recovery_work(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->schedule_recovery_work) + pf_ops->schedule_recovery_work(); +} +EXPORT_SYMBOL_GPL(cnss_common_schedule_recovery_work); + +void cnss_common_device_crashed(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->device_crashed) + pf_ops->device_crashed(); +} +EXPORT_SYMBOL_GPL(cnss_common_device_crashed); + +u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->get_wlan_mac_address) + return pf_ops->get_wlan_mac_address(num); + else + return NULL; +} +EXPORT_SYMBOL_GPL(cnss_common_get_wlan_mac_address); + +int cnss_common_set_wlan_mac_address(struct device *dev, const u8 *in, u32 len) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->set_wlan_mac_address) + return pf_ops->set_wlan_mac_address(in, len); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_common_set_wlan_mac_address); + +int cnss_power_up(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->power_up) + return pf_ops->power_up(dev); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_power_up); + +int cnss_power_down(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->power_down) + return pf_ops->power_down(dev); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_power_down); + +void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files, + u32 size, u32 tufello_dual_fw) +{ + if (tufello_dual_fw) + memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files)); + else + memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files)); +} +EXPORT_SYMBOL_GPL(cnss_get_qca9377_fw_files); + +int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files, + u32 target_type, u32 target_version) +{ + if (!pfw_files) + return -ENODEV; + + switch (target_version) { + case AR6320_REV1_VERSION: + case AR6320_REV1_1_VERSION: + memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_1, sizeof(*pfw_files)); + break; + case AR6320_REV1_3_VERSION: + memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_3, sizeof(*pfw_files)); + break; + case AR6320_REV2_1_VERSION: + memcpy(pfw_files, &FW_FILES_QCA6174_FW_2_0, sizeof(*pfw_files)); + break; + case AR6320_REV3_VERSION: + case AR6320_REV3_2_VERSION: + memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files)); + break; + default: + memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files)); + pr_err("%s default version 0x%X 0x%X\n", __func__, + target_type, target_version); + break; + } + return 0; +} +EXPORT_SYMBOL_GPL(cnss_get_fw_files_for_target); + +void cnss_set_cc_source(enum cnss_cc_src cc_source) +{ + cnss_cc_source = cc_source; +} +EXPORT_SYMBOL_GPL(cnss_set_cc_source); + +enum cnss_cc_src cnss_get_cc_source(void) +{ + return cnss_cc_source; +} +EXPORT_SYMBOL_GPL(cnss_get_cc_source); + +const char *cnss_wlan_get_evicted_data_file(void) +{ + return FW_FILES_QCA6174_FW_3_0.evicted_data; +} + +int cnss_common_register_tsf_captured_handler(struct device *dev, + irq_handler_t handler, void *ctx) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->register_tsf_captured_handler) + return pf_ops->register_tsf_captured_handler(handler, ctx); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_common_register_tsf_captured_handler); + +int cnss_common_unregister_tsf_captured_handler(struct device *dev, + void *ctx) +{ + struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev); + + if (pf_ops && pf_ops->unregister_tsf_captured_handler) + return pf_ops->unregister_tsf_captured_handler(ctx); + else + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_common_unregister_tsf_captured_handler); diff --git a/drivers/net/wireless/cnss/cnss_common.h b/drivers/net/wireless/cnss/cnss_common.h new file mode 100644 index 000000000000..aa64d7c7bebd --- /dev/null +++ b/drivers/net/wireless/cnss/cnss_common.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NET_CNSS_COMMON_H_ +#define _NET_CNSS_COMMON_H_ + +/* max 20mhz channel count */ +#define CNSS_MAX_CH_NUM 45 + +struct cnss_cap_tsf_info { + int irq_num; + void *context; + irq_handler_t irq_handler; +}; + +struct cnss_dev_platform_ops { + int (*request_bus_bandwidth)(int bandwidth); + void* (*get_virt_ramdump_mem)(unsigned long *size); + void (*device_self_recovery)(void); + void (*schedule_recovery_work)(void); + void (*device_crashed)(void); + u8 * (*get_wlan_mac_address)(u32 *num); + int (*set_wlan_mac_address)(const u8 *in, u32 len); + int (*power_up)(struct device *dev); + int (*power_down)(struct device *dev); + int (*register_tsf_captured_handler)(irq_handler_t handler, + void *adapter); + int (*unregister_tsf_captured_handler)(void *adapter); +}; + +int cnss_pci_request_bus_bandwidth(int bandwidth); +int cnss_sdio_request_bus_bandwidth(int bandwidth); + +void cnss_sdio_device_crashed(void); +void cnss_pci_device_crashed(void); + +void cnss_pci_device_self_recovery(void); +void cnss_sdio_device_self_recovery(void); + +void *cnss_pci_get_virt_ramdump_mem(unsigned long *size); +void *cnss_sdio_get_virt_ramdump_mem(unsigned long *size); + +void cnss_sdio_schedule_recovery_work(void); +void cnss_pci_schedule_recovery_work(void); + +int cnss_pcie_set_wlan_mac_address(const u8 *in, u32 len); +int cnss_sdio_set_wlan_mac_address(const u8 *in, u32 len); + +u8 *cnss_pci_get_wlan_mac_address(u32 *num); +u8 *cnss_sdio_get_wlan_mac_address(u32 *num); +int cnss_sdio_power_up(struct device *dev); +int cnss_sdio_power_down(struct device *dev); +int cnss_pcie_power_up(struct device *dev); +int cnss_pcie_power_down(struct device *dev); +const char *cnss_wlan_get_evicted_data_file(void); +#endif /* _NET_CNSS_COMMON_H_ */ diff --git a/drivers/net/wireless/cnss/cnss_sdio.c b/drivers/net/wireless/cnss/cnss_sdio.c new file mode 100644 index 000000000000..a1866c25550a --- /dev/null +++ b/drivers/net/wireless/cnss/cnss_sdio.c @@ -0,0 +1,1824 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "cnss_sdio:%s:%d:: " fmt, __func__, __LINE__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) +#include +#include +#endif +#include +#include +#include +#include "cnss_common.h" +#include +#include +#include +#include +#include + +#define WLAN_VREG_NAME "vdd-wlan" +#define WLAN_VREG_DSRC_NAME "vdd-wlan-dsrc" +#define WLAN_VREG_IO_NAME "vdd-wlan-io" +#define WLAN_VREG_XTAL_NAME "vdd-wlan-xtal" +#define WLAN_GPIO_CAPTSF_NAME "qcom,cap-tsf-gpio" + +#define WLAN_VREG_IO_MAX 1800000 +#define WLAN_VREG_IO_MIN 1800000 +#define WLAN_VREG_XTAL_MAX 3465000 +#define WLAN_VREG_XTAL_MIN 1620000 +#define WLAN_VREG_XTAL_TYP 1800000 +#define POWER_ON_DELAY 4 + +/* Values for Dynamic Ramdump Collection*/ +#define CNSS_DUMP_FORMAT_VER 0x11 +#define CNSS_DUMP_MAGIC_VER_V2 0x42445953 +#define CNSS_DUMP_NAME "CNSS_WLAN_SDIO" +#define CNSS_PINCTRL_SLEEP_STATE "sleep" +#define CNSS_PINCTRL_ACTIVE_STATE "active" + +struct cnss_sdio_regulator { + struct regulator *wlan_io; + struct regulator *wlan_xtal; + struct regulator *wlan_vreg; + struct regulator *wlan_vreg_dsrc; +}; + +struct cnss_sdio_info { + struct cnss_sdio_wlan_driver *wdrv; + struct sdio_func *func; + struct mmc_card *card; + struct mmc_host *host; + struct device *dev; + const struct sdio_device_id *id; + bool skip_wlan_en_toggle; + bool cnss_hw_state; + struct cnss_cap_tsf_info cap_tsf_info; +}; + +#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) +struct cnss_ssr_info { + struct subsys_device *subsys; + struct subsys_desc subsysdesc; + void *subsys_handle; + struct ramdump_device *ramdump_dev; + unsigned long ramdump_size; + void *ramdump_addr; + phys_addr_t ramdump_phys; + struct msm_dump_data dump_data; + bool ramdump_dynamic; + char subsys_name[10]; +}; +#endif + +struct cnss_ramdump_info { + void *ramdump_dev; + unsigned long ramdump_size; + void *ramdump_va; + phys_addr_t ramdump_pa; + struct msm_dump_data dump_data; + char subsys_name[10]; + bool ramdump_dynamic; +}; + +struct cnss_wlan_pinctrl_info { + bool is_antenna_shared; + struct pinctrl *pinctrl; + struct pinctrl_state *sleep; + struct pinctrl_state *active; +}; + +struct cnss_sdio_bus_bandwidth { + struct msm_bus_scale_pdata *bus_scale_table; + u32 bus_client; + int current_bandwidth_vote; +}; + +static struct cnss_sdio_data { + struct cnss_sdio_regulator regulator; + struct platform_device *pdev; + struct cnss_sdio_info cnss_sdio_info; +#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) + struct cnss_ssr_info ssr_info; +#endif + struct pm_qos_request qos_request; + struct cnss_wlan_pinctrl_info pinctrl_info; + struct cnss_sdio_bus_bandwidth bus_bandwidth; + struct cnss_dev_platform_ops platform_ops; + struct notifier_block panic_nb; + struct cnss_ramdump_info ramdump_info; + struct rproc *rproc; +} *cnss_pdata; + +#define WLAN_RECOVERY_DELAY 1 +/* cnss sdio subsytem device name, required property */ +#define CNSS_SUBSYS_NAME_KEY "subsys-name" + +/* SDIO manufacturer ID and Codes */ +#define MANUFACTURER_ID_AR6320_BASE 0x500 +#define MANUFACTURER_ID_QCA9377_BASE 0x700 +#define MANUFACTURER_ID_QCA9379_BASE 0x800 +#define MANUFACTURER_CODE 0x271 + +static const struct sdio_device_id ar6k_id_table[] = { + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6320_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9377_BASE | 0xF))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x0))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x1))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x2))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x3))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x4))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x5))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x6))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x7))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x8))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0x9))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xA))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xB))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xC))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xD))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xE))}, + {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_QCA9379_BASE | 0xF))}, + {}, +}; +MODULE_DEVICE_TABLE(sdio, ar6k_id_table); + +void cnss_sdio_request_pm_qos_type(int latency_type, u32 qos_val) +{ + if (!cnss_pdata) + return; + + pr_debug("PM QoS value: %d\n", qos_val); + cpu_latency_qos_add_request(&cnss_pdata->qos_request, qos_val); +} +EXPORT_SYMBOL_GPL(cnss_sdio_request_pm_qos_type); + +int cnss_sdio_request_bus_bandwidth(int bandwidth) +{ + int ret = 0; + struct cnss_sdio_bus_bandwidth *bus_bandwidth; + + if (!cnss_pdata) + return -ENODEV; + + bus_bandwidth = &cnss_pdata->bus_bandwidth; + if (!bus_bandwidth->bus_client) + return -EINVAL; +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + switch (bandwidth) { + case CNSS_BUS_WIDTH_NONE: + case CNSS_BUS_WIDTH_LOW: + case CNSS_BUS_WIDTH_MEDIUM: + case CNSS_BUS_WIDTH_HIGH: + ret = legacy_bus_client_update_request(bus_bandwidth->bus_client, + bandwidth); + if (!ret) { + bus_bandwidth->current_bandwidth_vote = bandwidth; + } else { + pr_debug("could not set bus bandwidth %d, ret = %d\n", + bandwidth, ret); + } + break; + default: + pr_debug("Invalid request %d\n", bandwidth); + ret = -EINVAL; + } +#endif + return ret; +} + +void cnss_sdio_request_pm_qos(u32 qos_val) +{ + if (!cnss_pdata) + return; + + pr_debug("PM QoS value: %d\n", qos_val); + cpu_latency_qos_add_request(&cnss_pdata->qos_request, qos_val); +} +EXPORT_SYMBOL_GPL(cnss_sdio_request_pm_qos); + +void cnss_sdio_remove_pm_qos(void) +{ + if (!cnss_pdata) + return; + cpu_latency_qos_remove_request(&cnss_pdata->qos_request); + pr_debug("PM QoS removed\n"); +} +EXPORT_SYMBOL_GPL(cnss_sdio_remove_pm_qos); + +static int cnss_put_hw_resources(struct device *dev) +{ + int ret = -EINVAL; + struct cnss_sdio_info *info; + struct mmc_host *host; + + if (!cnss_pdata) + return ret; + + info = &cnss_pdata->cnss_sdio_info; + + if (info->skip_wlan_en_toggle) { + pr_debug("HW doesn't support wlan toggling\n"); + return 0; + } + + if (!info->cnss_hw_state) { + pr_debug("HW resources are already released\n"); + return 0; + } + + host = info->host; + + if (!host) { + pr_err("MMC host is invalid\n"); + return ret; + } +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + ret = mmc_power_save_host(host); + if (ret) { + pr_err("Failed to Power Save Host err:%d\n", + ret); + return ret; + } +#endif + + if (cnss_pdata->regulator.wlan_vreg) + regulator_disable(cnss_pdata->regulator.wlan_vreg); + else + pr_debug("wlan_vreg regulator is invalid\n"); + + info->cnss_hw_state = false; + + return 0; +} + +static int cnss_get_hw_resources(struct device *dev) +{ + int ret = 0; + struct mmc_host *host; + struct cnss_sdio_info *info; + + if (!cnss_pdata) + return ret; + + info = &cnss_pdata->cnss_sdio_info; + + if (info->skip_wlan_en_toggle) { + pr_debug("HW doesn't support wlan toggling\n"); + return 0; + } + + if (info->cnss_hw_state) { + pr_debug("HW resources are already active\n"); + return 0; + } + + host = info->host; + + if (!host) { + pr_err("MMC Host is Invalid; Enumeration Failed\n"); + return ret; + } + + if (cnss_pdata->regulator.wlan_vreg) { + ret = regulator_enable(cnss_pdata->regulator.wlan_vreg); + if (ret) { + pr_err("Failed to enable wlan vreg\n"); + return ret; + } + } else { + pr_debug("wlan_vreg regulator is invalid\n"); + } +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + ret = mmc_power_restore_host(host); + if (ret) { + pr_err("Failed to restore host power ret:%d\n", + ret); + if (cnss_pdata->regulator.wlan_vreg) + regulator_disable(cnss_pdata->regulator.wlan_vreg); + return ret; + } +#endif + info->cnss_hw_state = true; + return ret; +} + +#if IS_ENABLED(CONFIG_MSM_SUBSYSTEM_RESTART) +static int cnss_sdio_shutdown(const struct subsys_desc *subsys, bool force_stop) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + int ret = 0; + + if (!cnss_pdata) + return -ENODEV; + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (!wdrv) + return 0; + if (!wdrv->shutdown) + return 0; + wdrv->shutdown(cnss_info->func); + ret = cnss_put_hw_resources(cnss_info->dev); + if (ret) + pr_err("Failed to put hw resources\n"); + return ret; +} + +static int cnss_sdio_powerup(const struct subsys_desc *subsys) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + int ret = 0; + + if (!cnss_pdata) + return -ENODEV; + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (!wdrv) + return 0; + if (!wdrv->reinit) + return 0; + ret = cnss_get_hw_resources(cnss_info->dev); + if (ret) { + pr_err("Failed to power up HW\n"); + return ret; + } + ret = wdrv->reinit(cnss_info->func, cnss_info->id); + if (ret) + pr_err("wlan reinit error=%d\n", ret); + return ret; +} + +static void cnss_sdio_crash_shutdown(const struct subsys_desc *subsys) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + + if (!cnss_pdata) + return; + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (wdrv && wdrv->crash_shutdown) + wdrv->crash_shutdown(cnss_info->func); +} + +static int cnss_sdio_ramdump(int enable, const struct subsys_desc *subsys) +{ + struct cnss_ssr_info *ssr_info; + struct ramdump_segment segment; + int ret; + + if (!cnss_pdata) + return -ENODEV; + + if (!cnss_pdata->ssr_info.ramdump_size) + return -ENOENT; + + if (!enable) + return 0; + ssr_info = &cnss_pdata->ssr_info; + memset(&segment, 0, sizeof(segment)); + segment.v_address = ssr_info->ramdump_addr; + segment.size = ssr_info->ramdump_size; + ret = do_ramdump(ssr_info->ramdump_dev, &segment, 1); + if (ret) + pr_err("do_ramdump failed error=%d\n", ret); + return ret; +} + +static int cnss_subsys_init(void) +{ + struct cnss_ssr_info *ssr_info; + int ret = 0; + + if (!cnss_pdata) + return -ENODEV; + + ssr_info = &cnss_pdata->ssr_info; + ssr_info->subsysdesc.name = ssr_info->subsys_name; + ssr_info->subsysdesc.owner = THIS_MODULE; + ssr_info->subsysdesc.shutdown = cnss_sdio_shutdown; + ssr_info->subsysdesc.powerup = cnss_sdio_powerup; + ssr_info->subsysdesc.ramdump = cnss_sdio_ramdump; + ssr_info->subsysdesc.crash_shutdown = cnss_sdio_crash_shutdown; + ssr_info->subsysdesc.dev = &cnss_pdata->pdev->dev; + ssr_info->subsys = subsys_register(&ssr_info->subsysdesc); + if (IS_ERR(ssr_info->subsys)) { + ret = PTR_ERR(ssr_info->subsys); + ssr_info->subsys = NULL; + dev_err(&cnss_pdata->pdev->dev, "Failed to subsys_register error=%d\n", + ret); + goto err_subsys_reg; + } + ssr_info->subsys_handle = subsystem_get(ssr_info->subsysdesc.name); + if (IS_ERR(ssr_info->subsys_handle)) { + ret = PTR_ERR(ssr_info->subsys_handle); + ssr_info->subsys_handle = NULL; + dev_err(&cnss_pdata->pdev->dev, "Failed to subsystem_get error=%d\n", + ret); + goto err_subsys_get; + } + return 0; +err_subsys_get: + subsys_unregister(ssr_info->subsys); + ssr_info->subsys = NULL; +err_subsys_reg: + return ret; +} + +static void cnss_subsys_exit(void) +{ + struct cnss_ssr_info *ssr_info; + + if (!cnss_pdata) + return; + + ssr_info = &cnss_pdata->ssr_info; + if (ssr_info->subsys_handle) + subsystem_put(ssr_info->subsys_handle); + ssr_info->subsys_handle = NULL; + if (ssr_info->subsys) + subsys_unregister(ssr_info->subsys); + ssr_info->subsys = NULL; +} + +static void cnss_ramdump_cleanup(void) +{ + struct cnss_ssr_info *ssr_info; + struct device *dev; + + if (!cnss_pdata) + return; + + dev = &cnss_pdata->pdev->dev; + ssr_info = &cnss_pdata->ssr_info; + if (ssr_info->ramdump_addr) { + if (ssr_info->ramdump_dynamic) + dma_free_coherent(dev, ssr_info->ramdump_size, + ssr_info->ramdump_addr, + ssr_info->ramdump_phys); + else + iounmap(ssr_info->ramdump_addr); + } + + ssr_info->ramdump_addr = NULL; + if (ssr_info->ramdump_dev) + destroy_ramdump_device(ssr_info->ramdump_dev); + ssr_info->ramdump_dev = NULL; +} + +void *cnss_sdio_get_virt_ramdump_mem(unsigned long *size) +{ + if (!cnss_pdata || !cnss_pdata->pdev) + return NULL; + + *size = cnss_pdata->ssr_info.ramdump_size; + + return cnss_pdata->ssr_info.ramdump_addr; +} + +void cnss_sdio_device_self_recovery(void) +{ + cnss_sdio_shutdown(NULL, false); + msleep(WLAN_RECOVERY_DELAY); + cnss_sdio_powerup(NULL); +} + +void cnss_sdio_device_crashed(void) +{ + struct cnss_ssr_info *ssr_info; + + if (!cnss_pdata) + return; + ssr_info = &cnss_pdata->ssr_info; + if (ssr_info->subsys) { + subsys_set_crash_status(ssr_info->subsys, true); + subsystem_restart_dev(ssr_info->subsys); + } +} + +static void cnss_sdio_recovery_work_handler(struct work_struct *recovery) +{ + cnss_sdio_device_self_recovery(); +} + +DECLARE_WORK(cnss_sdio_recovery_work, cnss_sdio_recovery_work_handler); + +void cnss_sdio_schedule_recovery_work(void) +{ + schedule_work(&cnss_sdio_recovery_work); +} + +/** + * cnss_get_restart_level() - cnss get restart level API + * + * Wlan sdio function driver uses this API to get the current + * subsystem restart level. + * + * Return: CNSS_RESET_SOC - "SYSTEM", restart system + * CNSS_RESET_SUBSYS_COUPLED - "RELATED",restart subsystem + */ +int cnss_get_restart_level(void) +{ + struct cnss_ssr_info *ssr_info; + int level = 0; + + if (!cnss_pdata) + return CNSS_RESET_SOC; + ssr_info = &cnss_pdata->ssr_info; + if (!ssr_info->subsys) + return CNSS_RESET_SOC; + + level = subsys_get_restart_level(ssr_info->subsys); + + switch (level) { + case RESET_SOC: + return CNSS_RESET_SOC; + case RESET_SUBSYS_COUPLED: + return CNSS_RESET_SUBSYS_COUPLED; + default: + return CNSS_RESET_SOC; + } +} +EXPORT_SYMBOL_GPL(cnss_get_restart_level); + +#else + +/** + * cnss_get_restart_level() - cnss get restart level API + * + * Wlan sdio function driver uses this API to get the current + * subsystem restart level. + * + * Return: CNSS_RESET_SOC - "SYSTEM", restart system + * CNSS_RESET_SUBSYS_COUPLED - "RELATED",restart subsystem + */ + +int cnss_get_restart_level(void) +{ + if (!cnss_pdata) + return CNSS_RESET_SOC; + if (cnss_pdata->rproc->recovery_disabled) + return CNSS_RESET_SOC; + else + return CNSS_RESET_SUBSYS_COUPLED; +} +EXPORT_SYMBOL_GPL(cnss_get_restart_level); + +static int cnss_sdio_shutdown(void) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + int ret = 0; + + if (!cnss_pdata) + return -ENODEV; + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (!wdrv) + return 0; + if (!wdrv->shutdown) + return 0; + wdrv->shutdown(cnss_info->func); + ret = cnss_put_hw_resources(cnss_info->dev); + if (ret) + pr_err("Failed to put hw resources\n"); + return ret; +} + +static int cnss_sdio_powerup(void) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + int ret = 0; + + if (!cnss_pdata) + return -ENODEV; + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (!wdrv) + return 0; + if (!wdrv->reinit) + return 0; + ret = cnss_get_hw_resources(cnss_info->dev); + if (ret) { + pr_err("Failed to power up HW\n"); + return ret; + } + ret = wdrv->reinit(cnss_info->func, cnss_info->id); + if (ret) + pr_err("wlan reinit error=%d\n", ret); + return ret; +} + +void cnss_sdio_device_self_recovery(void) +{ + cnss_sdio_shutdown(); + msleep(WLAN_RECOVERY_DELAY); + cnss_sdio_powerup(); +} + +static void cnss_sdio_recovery_work_handler(struct work_struct *recovery) +{ + cnss_sdio_device_self_recovery(); +} + +DECLARE_WORK(cnss_sdio_recovery_work, cnss_sdio_recovery_work_handler); + +void cnss_sdio_schedule_recovery_work(void) +{ + schedule_work(&cnss_sdio_recovery_work); +} + +void cnss_sdio_device_crashed(void) +{ + if (!cnss_pdata) + return; + if (cnss_pdata->rproc->recovery_disabled) + panic("subsys-restart: Resetting the SoC wlan crashed\n"); + cnss_sdio_schedule_recovery_work(); +} + +static void cnss_sdio_crash_shutdown(void) +{ + struct cnss_sdio_info *cnss_info; + struct cnss_sdio_wlan_driver *wdrv; + + if (!cnss_pdata) + return; + + cnss_info = &cnss_pdata->cnss_sdio_info; + wdrv = cnss_info->wdrv; + if (wdrv && wdrv->crash_shutdown) + wdrv->crash_shutdown(cnss_info->func); +} + +static int cnss_panic_handler(struct notifier_block *nb, unsigned long action, + void *data) +{ + cnss_sdio_crash_shutdown(); + return NOTIFY_DONE; +} + +static int cnss_sdio_up(struct rproc *rproc) +{ + return 0; +} + +static int cnss_sdio_down(struct rproc *rproc) +{ + return 0; +} + +int cnss_subsys_init(void) +{ + int ret; + const char *name; + struct device *dev; + + static const struct rproc_ops cnss_ops = { + .start = cnss_sdio_up, + .stop = cnss_sdio_down, + }; + + const struct rproc_ops *ops = &cnss_ops; + + if (!cnss_pdata) + return -ENODEV; + + dev = &cnss_pdata->pdev->dev; + ret = of_property_read_string(dev->of_node, CNSS_SUBSYS_NAME_KEY, + &name); + if (ret) { + pr_err("cnss missing DT key '%s'\n", CNSS_SUBSYS_NAME_KEY); + ret = -ENODEV; + } + + cnss_pdata->panic_nb.notifier_call = cnss_panic_handler; + ret = atomic_notifier_chain_register(&panic_notifier_list, + &cnss_pdata->panic_nb); + if (ret) { + pr_err("Failed to register panic handler\n"); + return -EINVAL; + } + cnss_pdata->rproc = rproc_alloc(&cnss_pdata->pdev->dev, name, ops, NULL, 0); + if (!cnss_pdata->rproc) { + pr_err("Failed to allocate rproc\n"); + return -ENOMEM; + } + cnss_pdata->rproc->recovery_disabled = true; + ret = rproc_add(cnss_pdata->rproc); + if (ret) { + pr_info("rproc_add failed: %d\n", ret); + goto free_rproc; + } + + return 0; +free_rproc: + rproc_free(cnss_pdata->rproc); + return 0; +} + +void cnss_subsys_exit(void) +{ + int ret; + + ret = atomic_notifier_chain_unregister(&panic_notifier_list, + &cnss_pdata->panic_nb); + if (ret) + pr_err("Failed to unregister panic handler\n"); + rproc_free(cnss_pdata->rproc); +} + +static int cnss_configure_dump_table(struct cnss_ramdump_info *ramdump_info) +{ + struct msm_dump_entry dump_entry; + int ret; + + ramdump_info->dump_data.addr = ramdump_info->ramdump_pa; + ramdump_info->dump_data.len = ramdump_info->ramdump_size; + ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER; + ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2; + strscpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME, + sizeof(ramdump_info->dump_data.name)); + + dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN; + dump_entry.addr = virt_to_phys(&ramdump_info->dump_data); + + ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry); + if (ret) + pr_err("Dump table setup failed: %d\n", ret); + + return ret; +} + +static int cnss_configure_ramdump(void) +{ + struct cnss_ramdump_info *ramdump_info; + int ret = 0; + struct resource *res; + const char *name; + u32 ramdump_size = 0; + struct device *dev; + + if (!cnss_pdata) + return -ENODEV; + + dev = &cnss_pdata->pdev->dev; + ramdump_info = &cnss_pdata->ramdump_info; + ret = of_property_read_string(dev->of_node, CNSS_SUBSYS_NAME_KEY, + &name); + if (ret) { + pr_err("cnss missing DT key '%s'\n", CNSS_SUBSYS_NAME_KEY); + ret = -ENODEV; + goto err_subsys_name_query; + } + strscpy(ramdump_info->subsys_name, name, sizeof(ramdump_info->subsys_name)); + if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic", + &ramdump_size) == 0) { + ramdump_info->ramdump_va = + dma_alloc_coherent(dev, ramdump_size, + &ramdump_info->ramdump_pa, + GFP_KERNEL); + if (ramdump_info->ramdump_va) + ramdump_info->ramdump_size = ramdump_size; + ramdump_info->ramdump_dynamic = true; + } else { + res = platform_get_resource_byname(cnss_pdata->pdev, + IORESOURCE_MEM, "ramdump"); + if (res) { + ramdump_info->ramdump_pa = res->start; + ramdump_size = resource_size(res); + ramdump_info->ramdump_va = ioremap(ramdump_info->ramdump_pa, + ramdump_size); + if (ramdump_info->ramdump_va) + ramdump_info->ramdump_size = ramdump_size; + ramdump_info->ramdump_dynamic = false; + } + } + + pr_info("ramdump addr: %p, phys: %pa subsys:'%s'\n", + ramdump_info->ramdump_va, &ramdump_info->ramdump_pa, + ramdump_info->subsys_name); + + if (ramdump_info->ramdump_size == 0) { + pr_info("CNSS ramdump will not be collected\n"); + return 0; + } + + if (ramdump_info->ramdump_dynamic) { + ret = cnss_configure_dump_table(ramdump_info); + if (ret) + goto err_configure_dump_table; + } + + ramdump_info->ramdump_dev = qcom_create_ramdump_device(ramdump_info->subsys_name, + dev); + if (!ramdump_info->ramdump_dev) { + ret = -ENOMEM; + pr_err("ramdump dev create failed: error=%d\n", ret); + goto err_configure_dump_table; + } + + return 0; + +err_configure_dump_table: + if (ramdump_info->ramdump_dynamic) + dma_free_coherent(dev, ramdump_info->ramdump_size, + ramdump_info->ramdump_va, + ramdump_info->ramdump_pa); + else + iounmap(ramdump_info->ramdump_va); + + ramdump_info->ramdump_va = NULL; + ramdump_info->ramdump_size = 0; +err_subsys_name_query: + return ret; +} + +static void cnss_ramdump_cleanup(void) +{ + struct device *dev; + struct cnss_ramdump_info *ramdump_info; + + if (!cnss_pdata) + return; + + dev = &cnss_pdata->pdev->dev; + ramdump_info = &cnss_pdata->ramdump_info; + if (ramdump_info->ramdump_va) { + if (ramdump_info->ramdump_dynamic) + dma_free_coherent(dev, ramdump_info->ramdump_size, + ramdump_info->ramdump_va, + ramdump_info->ramdump_pa); + else + iounmap(ramdump_info->ramdump_va); + } + + ramdump_info->ramdump_va = NULL; + if (ramdump_info->ramdump_dev) + qcom_destroy_ramdump_device(ramdump_info->ramdump_dev); + ramdump_info->ramdump_dev = NULL; +} + +void *cnss_sdio_get_virt_ramdump_mem(unsigned long *size) +{ + if (!cnss_pdata || !cnss_pdata->pdev) + return NULL; + + *size = cnss_pdata->ramdump_info.ramdump_size; + + return cnss_pdata->ramdump_info.ramdump_va; +} +#endif + +static inline int cnss_get_tsf_cap_irq(struct device *dev) +{ + int irq = -EINVAL; + int gpio; + + if (!dev) + return -ENODEV; + + gpio = of_get_named_gpio(dev->of_node, WLAN_GPIO_CAPTSF_NAME, 0); + if (gpio >= 0) + irq = gpio_to_irq(gpio); + + return irq; +} + +static int cnss_sdio_register_tsf_captured_handler(irq_handler_t handler, + void *ctx) +{ + struct cnss_cap_tsf_info *tsf_info; + + if (!cnss_pdata) + return -ENODEV; + + tsf_info = &cnss_pdata->cnss_sdio_info.cap_tsf_info; + if (tsf_info->irq_num < 0) + return -EOPNOTSUPP; + + tsf_info->irq_handler = handler; + tsf_info->context = ctx; + return 0; +} + +static int cnss_sdio_unregister_tsf_captured_handler(void *ctx) +{ + struct cnss_cap_tsf_info *tsf_info; + + if (!cnss_pdata) + return -ENODEV; + + tsf_info = &cnss_pdata->cnss_sdio_info.cap_tsf_info; + if (tsf_info->irq_num < 0) + return -EOPNOTSUPP; + + if (ctx == tsf_info->context) { + tsf_info->irq_handler = NULL; + tsf_info->context = NULL; + } + return 0; +} + +static irqreturn_t cnss_sdio_tsf_captured_handler(int irq, void *ctx) +{ + struct cnss_cap_tsf_info *tsf_info; + + if (!cnss_pdata) + return IRQ_HANDLED; + + tsf_info = &cnss_pdata->cnss_sdio_info.cap_tsf_info; + if (tsf_info->irq_num < 0 || tsf_info->irq_num != irq || + !tsf_info->irq_handler || !tsf_info->context) + return IRQ_HANDLED; + + return tsf_info->irq_handler(irq, tsf_info->context); +} + +static void cnss_sdio_tsf_init(struct device *dev, + struct cnss_cap_tsf_info *tsf_info) +{ + int ret, irq; + + tsf_info->irq_num = -EINVAL; + tsf_info->irq_handler = NULL; + tsf_info->context = NULL; + + irq = cnss_get_tsf_cap_irq(dev); + if (irq < 0) { + dev_err(dev, "%s: fail to get irq: %d\n", __func__, irq); + return; + } + + ret = request_irq(irq, cnss_sdio_tsf_captured_handler, + IRQF_SHARED | IRQF_TRIGGER_RISING, dev_name(dev), + (void *)tsf_info); + dev_err(dev, "%s: request irq[%d] for dev: %s, result: %d\n", + __func__, irq, dev_name(dev), ret); + if (!ret) + tsf_info->irq_num = irq; +} + +static void cnss_sdio_tsf_deinit(struct cnss_cap_tsf_info *tsf_info) +{ + int irq = tsf_info->irq_num; + + if (irq < 0) + return; + + free_irq(irq, (void *)tsf_info); + + tsf_info->irq_num = -EINVAL; + tsf_info->irq_handler = NULL; + tsf_info->context = NULL; +} + +static void cnss_sdio_set_platform_ops(struct device *dev) +{ + struct cnss_dev_platform_ops *pf_ops = &cnss_pdata->platform_ops; + + pf_ops->power_up = cnss_sdio_power_up; + pf_ops->power_down = cnss_sdio_power_down; + pf_ops->device_crashed = cnss_sdio_device_crashed; + + pf_ops->get_virt_ramdump_mem = cnss_sdio_get_virt_ramdump_mem; + pf_ops->device_self_recovery = cnss_sdio_device_self_recovery; + pf_ops->schedule_recovery_work = cnss_sdio_schedule_recovery_work; + pf_ops->get_wlan_mac_address = cnss_sdio_get_wlan_mac_address; + pf_ops->set_wlan_mac_address = cnss_sdio_set_wlan_mac_address; + pf_ops->request_bus_bandwidth = cnss_sdio_request_bus_bandwidth; + pf_ops->register_tsf_captured_handler = + cnss_sdio_register_tsf_captured_handler; + pf_ops->unregister_tsf_captured_handler = + cnss_sdio_unregister_tsf_captured_handler; + dev->platform_data = pf_ops; +} + +static int cnss_sdio_wlan_inserted(struct sdio_func *func, + const struct sdio_device_id *id) +{ + struct cnss_sdio_info *info; + + if (!cnss_pdata) + return -ENODEV; + + info = &cnss_pdata->cnss_sdio_info; + + info->func = func; + info->card = func->card; + info->host = func->card->host; + info->id = id; + info->dev = &func->dev; + cnss_sdio_set_platform_ops(info->dev); + + cnss_put_hw_resources(cnss_pdata->cnss_sdio_info.dev); + + pr_info("SDIO Device is Probed\n"); + return 0; +} + +static void cnss_sdio_wlan_removed(struct sdio_func *func) +{ + struct cnss_sdio_info *info; + + if (!cnss_pdata) + return; + + info = &cnss_pdata->cnss_sdio_info; + + info->host = NULL; + info->card = NULL; + info->func = NULL; + info->id = NULL; +} + +#if defined(CONFIG_PM) +static int cnss_sdio_wlan_suspend(struct device *dev) +{ + struct cnss_sdio_wlan_driver *wdrv; +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + struct cnss_sdio_bus_bandwidth *bus_bandwidth; +#endif + struct sdio_func *func; + + int error = 0; + + if (!cnss_pdata) + return -ENODEV; + +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + bus_bandwidth = &cnss_pdata->bus_bandwidth; + if (bus_bandwidth->bus_client) { + legacy_bus_client_update_request(bus_bandwidth->bus_client, + CNSS_BUS_WIDTH_NONE); + } +#endif + func = cnss_pdata->cnss_sdio_info.func; + wdrv = cnss_pdata->cnss_sdio_info.wdrv; + if (!wdrv) { + /* This can happen when no wlan driver loaded (no register to + * platform driver). + */ + sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); + pr_debug("wlan driver not registered\n"); + return 0; + } + if (wdrv->suspend) { + error = wdrv->suspend(dev); + if (error) + pr_err("wlan suspend failed error=%d\n", error); + } + + return error; +} + +static int cnss_sdio_wlan_resume(struct device *dev) +{ + struct cnss_sdio_wlan_driver *wdrv; +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + struct cnss_sdio_bus_bandwidth *bus_bandwidth; +#endif + int error = 0; + + if (!cnss_pdata) + return -ENODEV; + +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + bus_bandwidth = &cnss_pdata->bus_bandwidth; + if (bus_bandwidth->bus_client) { + legacy_bus_client_update_request(bus_bandwidth->bus_client, + bus_bandwidth->current_bandwidth_vote); + } +#endif + + wdrv = cnss_pdata->cnss_sdio_info.wdrv; + if (!wdrv) { + /* This can happen when no wlan driver loaded (no register to + * platform driver). + */ + pr_debug("wlan driver not registered\n"); + return 0; + } + if (wdrv->resume) { + error = wdrv->resume(dev); + if (error) + pr_err("wlan resume failed error=%d\n", error); + } + return error; +} +#endif + +#if defined(CONFIG_PM) +static const struct dev_pm_ops cnss_ar6k_device_pm_ops = { + .suspend = cnss_sdio_wlan_suspend, + .resume = cnss_sdio_wlan_resume, +}; +#endif /* CONFIG_PM */ + +static struct sdio_driver cnss_ar6k_driver = { + .name = "cnss_ar6k_wlan", + .id_table = ar6k_id_table, + .probe = cnss_sdio_wlan_inserted, + .remove = cnss_sdio_wlan_removed, +#if defined(CONFIG_PM) + .drv = { + .pm = &cnss_ar6k_device_pm_ops, + } +#endif +}; + +static int cnss_set_pinctrl_state(struct cnss_sdio_data *pdata, bool state) +{ + struct cnss_wlan_pinctrl_info *info = &pdata->pinctrl_info; + + if (!info->is_antenna_shared) + return 0; + + if (!info->pinctrl) + return -EIO; + + return state ? pinctrl_select_state(info->pinctrl, info->active) : + pinctrl_select_state(info->pinctrl, info->sleep); +} + +int cnss_sdio_configure_spdt(bool state) +{ + if (!cnss_pdata) + return -ENODEV; + + return cnss_set_pinctrl_state(cnss_pdata, state); +} +EXPORT_SYMBOL_GPL(cnss_sdio_configure_spdt); + +/** + * cnss_sdio_wlan_register_driver() - cnss wlan register API + * @driver: sdio wlan driver interface from wlan driver. + * + * wlan sdio function driver uses this API to register callback + * functions to cnss_sido platform driver. The callback will + * be invoked by corresponding wrapper function of this cnss + * platform driver. + */ +int cnss_sdio_wlan_register_driver(struct cnss_sdio_wlan_driver *driver) +{ + struct cnss_sdio_info *cnss_info; + struct device *dev; + int error = -EINVAL; + + if (!cnss_pdata) + return -ENODEV; + + cnss_info = &cnss_pdata->cnss_sdio_info; + dev = cnss_info->dev; + + if (cnss_info->wdrv) { + pr_debug("wdrv already existed\n"); + return error; + } + + if (!driver) + return error; + + error = cnss_get_hw_resources(dev); + if (error) { + pr_err("Failed to restore power err:%d\n", error); + return error; + } + + error = cnss_set_pinctrl_state(cnss_pdata, PINCTRL_ACTIVE); + if (error) { + pr_err("Fail to set pinctrl to active state\n"); + cnss_put_hw_resources(dev); + goto put_hw; + } + + /* The HW resources are released in unregister logic if probe fails */ + error = driver->probe ? driver->probe(cnss_info->func, + cnss_info->id) : error; + if (error) { + pr_err("wlan probe failed error=%d\n", error); + /** + * Check memory leak in skb pre-alloc memory pool + * Reset the skb memory pool + */ + goto pinctrl_sleep; + } + + cnss_info->wdrv = driver; + + return error; + +pinctrl_sleep: + cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP); +put_hw: + return error; +} +EXPORT_SYMBOL_GPL(cnss_sdio_wlan_register_driver); + +/** + * cnss_sdio_wlan_unregister_driver() - cnss wlan unregister API + * @driver: sdio wlan driver interface from wlan driver. + * + * wlan sdio function driver uses this API to detach it from cnss_sido + * platform driver. + */ +void +cnss_sdio_wlan_unregister_driver(struct cnss_sdio_wlan_driver *driver) +{ + struct cnss_sdio_info *cnss_info; +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + struct cnss_sdio_bus_bandwidth *bus_bandwidth; +#endif + + if (!cnss_pdata) + return; + +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + bus_bandwidth = &cnss_pdata->bus_bandwidth; + if (bus_bandwidth->bus_client) { + legacy_bus_client_update_request(bus_bandwidth->bus_client, + CNSS_BUS_WIDTH_NONE); + } +#endif + + cnss_info = &cnss_pdata->cnss_sdio_info; + if (!cnss_info->wdrv) { + pr_err("driver not registered\n"); + return; + } + + if (!driver) + return; + + if (!driver->remove) + return; + + driver->remove(cnss_info->func); + + cnss_info->wdrv = NULL; + cnss_set_pinctrl_state(cnss_pdata, PINCTRL_SLEEP); + cnss_put_hw_resources(cnss_info->dev); +} +EXPORT_SYMBOL_GPL(cnss_sdio_wlan_unregister_driver); + +/** + * cnss_wlan_query_oob_status() - cnss wlan query oob status API + * + * Wlan sdio function driver uses this API to check whether oob is + * supported in platform driver. + * + * Return: 0 means oob is supported, others means unsupported. + */ +int cnss_wlan_query_oob_status(void) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_wlan_query_oob_status); + +/** + * cnss_wlan_register_oob_irq_handler() - cnss wlan register oob callback API + * @handler: oob callback function pointer which registered to platform driver. + * @pm_oob : parameter which registered to platform driver. + * + * Wlan sdio function driver uses this API to register oob callback + * function to platform driver. + * + * Return: 0 means register successfully, others means failure. + */ +int cnss_wlan_register_oob_irq_handler(oob_irq_handler_t handler, void *pm_oob) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_wlan_register_oob_irq_handler); + +/** + * cnss_wlan_unregister_oob_irq_handler() - unregister oob callback API + * @pm_oob: parameter which unregistered from platform driver. + * + * Wlan sdio function driver uses this API to unregister oob callback + * function from platform driver. + * + * Return: 0 means unregister successfully, others means failure. + */ +int cnss_wlan_unregister_oob_irq_handler(void *pm_oob) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(cnss_wlan_unregister_oob_irq_handler); + +static void cnss_sdio_reset_platform_ops(void) +{ + struct cnss_dev_platform_ops *pf_ops = &cnss_pdata->platform_ops; + struct cnss_sdio_info *sdio_info = &cnss_pdata->cnss_sdio_info; + + memset(pf_ops, 0, sizeof(struct cnss_dev_platform_ops)); + if (sdio_info->dev) + sdio_info->dev->platform_data = NULL; +} + +static int cnss_sdio_wlan_init(void) +{ + int error = 0; + + error = sdio_register_driver(&cnss_ar6k_driver); + if (error) { + cnss_sdio_reset_platform_ops(); + pr_err("registered fail error=%d\n", error); + } else { + pr_debug("registered success\n"); + } + return error; +} + +static void cnss_sdio_wlan_exit(void) +{ + if (!cnss_pdata) + return; + + cnss_sdio_reset_platform_ops(); + sdio_unregister_driver(&cnss_ar6k_driver); +} + +static void cnss_sdio_deinit_bus_bandwidth(void) +{ +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + struct cnss_sdio_bus_bandwidth *bus_bandwidth; + + bus_bandwidth = &cnss_pdata->bus_bandwidth; + if (bus_bandwidth->bus_client) { + legacy_bus_client_update_request(bus_bandwidth->bus_client, + CNSS_BUS_WIDTH_NONE); + legacy_bus_unregister_client(bus_bandwidth->bus_client); + } +#endif +} + +static int cnss_sdio_configure_wlan_enable_regulator(void) +{ + int error; + struct device *dev = &cnss_pdata->pdev->dev; + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_NAME "-supply", NULL)) { + cnss_pdata->regulator.wlan_vreg = + regulator_get(&cnss_pdata->pdev->dev, WLAN_VREG_NAME); + if (IS_ERR(cnss_pdata->regulator.wlan_vreg)) { + error = PTR_ERR(cnss_pdata->regulator.wlan_vreg); + dev_err(dev, "VDD-VREG get failed error=%d\n", error); + return error; + } + + error = regulator_enable(cnss_pdata->regulator.wlan_vreg); + if (error) { + dev_err(dev, "VDD-VREG enable failed error=%d\n", + error); + goto err_vdd_vreg_regulator; + } + } + + return 0; + +err_vdd_vreg_regulator: + regulator_put(cnss_pdata->regulator.wlan_vreg); + + return error; +} + +static int cnss_sdio_configure_wlan_enable_dsrc_regulator(void) +{ + int error; + struct device *dev = &cnss_pdata->pdev->dev; + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_DSRC_NAME "-supply", NULL)) { + cnss_pdata->regulator.wlan_vreg_dsrc = + regulator_get(&cnss_pdata->pdev->dev, WLAN_VREG_DSRC_NAME); + if (IS_ERR(cnss_pdata->regulator.wlan_vreg_dsrc)) { + error = PTR_ERR(cnss_pdata->regulator.wlan_vreg_dsrc); + dev_err(dev, "VDD-VREG-DSRC get failed error=%d\n", + error); + return error; + } + + error = regulator_enable(cnss_pdata->regulator.wlan_vreg_dsrc); + if (error) { + dev_err(dev, "VDD-VREG-DSRC enable failed error=%d\n", + error); + goto err_vdd_vreg_dsrc_regulator; + } + } + + return 0; + +err_vdd_vreg_dsrc_regulator: + regulator_put(cnss_pdata->regulator.wlan_vreg_dsrc); + + return error; +} + +static int cnss_sdio_configure_regulator(void) +{ + int error; + struct device *dev = &cnss_pdata->pdev->dev; + u32 vdd_xtal_min; + u32 vdd_xtal_max; + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_IO_NAME "-supply", NULL)) { + cnss_pdata->regulator.wlan_io = + regulator_get(&cnss_pdata->pdev->dev, WLAN_VREG_IO_NAME); + if (IS_ERR(cnss_pdata->regulator.wlan_io)) { + error = PTR_ERR(cnss_pdata->regulator.wlan_io); + dev_err(dev, "VDD-IO get failed error=%d\n", error); + return error; + } + + error = regulator_set_voltage(cnss_pdata->regulator.wlan_io, + WLAN_VREG_IO_MIN, + WLAN_VREG_IO_MAX); + if (error) { + dev_err(dev, "VDD-IO set failed error=%d\n", error); + goto err_vdd_io_regulator; + } else { + error = regulator_enable(cnss_pdata->regulator.wlan_io); + if (error) { + dev_err(dev, "VDD-IO enable failed error=%d\n", + error); + goto err_vdd_io_regulator; + } + } + } + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_XTAL_NAME "-supply", NULL)) { + cnss_pdata->regulator.wlan_xtal = + regulator_get(&cnss_pdata->pdev->dev, WLAN_VREG_XTAL_NAME); + if (IS_ERR(cnss_pdata->regulator.wlan_xtal)) { + error = PTR_ERR(cnss_pdata->regulator.wlan_xtal); + dev_err(dev, "VDD-XTAL get failed error=%d\n", error); + goto err_vdd_xtal_regulator; + } + + if (!of_property_read_u32(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_XTAL_NAME "-min", + &vdd_xtal_min)) { + if (vdd_xtal_min < WLAN_VREG_XTAL_MIN || + vdd_xtal_min > WLAN_VREG_XTAL_MAX) + vdd_xtal_min = WLAN_VREG_XTAL_TYP; + } else { + vdd_xtal_min = WLAN_VREG_XTAL_TYP; + } + + if (!of_property_read_u32(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_XTAL_NAME "-max", + &vdd_xtal_max)) { + if (vdd_xtal_max < WLAN_VREG_XTAL_MIN || + vdd_xtal_max > WLAN_VREG_XTAL_MAX) + vdd_xtal_max = WLAN_VREG_XTAL_TYP; + } else { + vdd_xtal_max = WLAN_VREG_XTAL_TYP; + } + + if (vdd_xtal_min > vdd_xtal_max) + vdd_xtal_min = vdd_xtal_max; + + error = regulator_set_voltage(cnss_pdata->regulator.wlan_xtal, + vdd_xtal_min, vdd_xtal_max); + if (error) { + dev_err(dev, "VDD-XTAL set failed error=%d\n", error); + goto err_vdd_xtal_regulator; + } else { + error = + regulator_enable(cnss_pdata->regulator.wlan_xtal); + if (error) { + dev_err(dev, "VDD-XTAL enable failed err=%d\n", + error); + goto err_vdd_xtal_regulator; + } + } + } + + return 0; + +err_vdd_xtal_regulator: + regulator_put(cnss_pdata->regulator.wlan_xtal); +err_vdd_io_regulator: + regulator_put(cnss_pdata->regulator.wlan_io); + return error; +} + +static void cnss_sdio_release_resource(void) +{ + if (cnss_pdata->regulator.wlan_xtal) + regulator_put(cnss_pdata->regulator.wlan_xtal); + if (cnss_pdata->regulator.wlan_vreg) + regulator_put(cnss_pdata->regulator.wlan_vreg); + if (cnss_pdata->regulator.wlan_io) + regulator_put(cnss_pdata->regulator.wlan_io); + if (cnss_pdata->regulator.wlan_vreg_dsrc) + regulator_put(cnss_pdata->regulator.wlan_vreg_dsrc); +} + +static int cnss_sdio_pinctrl_init(struct cnss_sdio_data *pdata, + struct platform_device *pdev) +{ + int ret = 0; + struct device *dev = &pdev->dev; + struct cnss_wlan_pinctrl_info *info = &pdata->pinctrl_info; + + if (!of_find_property(dev->of_node, "qcom,is-antenna-shared", NULL)) + return 0; + + info->is_antenna_shared = true; + info->pinctrl = devm_pinctrl_get(dev); + if ((IS_ERR_OR_NULL(info->pinctrl))) { + dev_err(dev, "%s: Failed to get pinctrl\n", __func__); + return PTR_ERR(info->pinctrl); + } + + info->sleep = pinctrl_lookup_state(info->pinctrl, + CNSS_PINCTRL_SLEEP_STATE); + if (IS_ERR_OR_NULL(info->sleep)) { + dev_err(dev, "%s: Fail to get sleep state for pin\n", __func__); + ret = PTR_ERR(info->sleep); + goto release_pinctrl; + } + + info->active = pinctrl_lookup_state(info->pinctrl, + CNSS_PINCTRL_ACTIVE_STATE); + if (IS_ERR_OR_NULL(info->active)) { + dev_err(dev, "%s: Fail to get active state for pin\n", + __func__); + ret = PTR_ERR(info->active); + goto release_pinctrl; + } + + ret = cnss_set_pinctrl_state(pdata, PINCTRL_SLEEP); + + if (ret) { + dev_err(dev, "%s: Fail to set pin in sleep state\n", __func__); + goto release_pinctrl; + } + + return ret; + +release_pinctrl: + devm_pinctrl_put(info->pinctrl); + info->is_antenna_shared = false; + return ret; +} + +static int cnss_sdio_init_bus_bandwidth(void) +{ + int ret = 0; +#ifdef CNSS_COMPLIE_ISSUE_FIX_LATER_IFNEEDED + struct cnss_sdio_bus_bandwidth *bus_bandwidth; + struct device *dev = &cnss_pdata->pdev->dev; + + bus_bandwidth = &cnss_pdata->bus_bandwidth; + bus_bandwidth->bus_scale_table = msm_bus_cl_get_pdata(cnss_pdata->pdev); + if (!bus_bandwidth->bus_scale_table) { + dev_err(dev, "Failed to get the bus scale platform data\n"); + ret = -EINVAL; + } + + bus_bandwidth->bus_client = + legacy_bus_register_client(bus_bandwidth->bus_scale_table); + if (!bus_bandwidth->bus_client) { + dev_err(dev, "Failed to register with bus_scale client\n"); + ret = -EINVAL; + } +#endif + return ret; +} + +static int cnss_sdio_probe(struct platform_device *pdev) +{ + int error; + struct device *dev = &pdev->dev; + struct cnss_sdio_info *info; + + if (pdev->dev.of_node) { + cnss_pdata = devm_kzalloc(&pdev->dev, + sizeof(*cnss_pdata), GFP_KERNEL); + if (!cnss_pdata) + return -ENOMEM; + } else { + cnss_pdata = pdev->dev.platform_data; + } + + if (!cnss_pdata) + return -EINVAL; + + cnss_pdata->pdev = pdev; + info = &cnss_pdata->cnss_sdio_info; + + error = cnss_sdio_pinctrl_init(cnss_pdata, pdev); + if (error) { + dev_err(&pdev->dev, "Fail to configure pinctrl err:%d\n", + error); + return error; + } + + error = cnss_sdio_configure_regulator(); + if (error) { + dev_err(&pdev->dev, "Failed to configure voltage regulator error=%d\n", + error); + return error; + } + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_NAME "-supply", NULL)) { + error = cnss_sdio_configure_wlan_enable_regulator(); + if (error) { + dev_err(&pdev->dev, + "Failed to enable wlan enable regulator error=%d\n", + error); + goto err_wlan_enable_regulator; + } + } + + if (of_get_property(cnss_pdata->pdev->dev.of_node, + WLAN_VREG_DSRC_NAME "-supply", NULL)) { + error = cnss_sdio_configure_wlan_enable_dsrc_regulator(); + if (error) { + dev_err(&pdev->dev, + "Failed to enable wlan dsrc enable regulator\n"); + goto err_wlan_dsrc_enable_regulator; + } + } + + info->skip_wlan_en_toggle = of_property_read_bool(dev->of_node, + "qcom,skip-wlan-en-toggle"); + info->cnss_hw_state = true; + + cnss_sdio_tsf_init(dev, &info->cap_tsf_info); + + error = cnss_sdio_wlan_init(); + if (error) { + dev_err(&pdev->dev, "cnss wlan init failed error=%d\n", error); + goto err_wlan_dsrc_enable_regulator; + } + + error = cnss_configure_ramdump(); + if (error) { + dev_err(&pdev->dev, "Failed to configure ramdump error=%d\n", + error); + goto err_ramdump_create; + } + + error = cnss_subsys_init(); + if (error) { + dev_err(&pdev->dev, "Failed to cnss_subsys_init error=%d\n", + error); + goto err_subsys_init; + } + + if (of_property_read_bool(pdev->dev.of_node, + "qcom,cnss-enable-bus-bandwidth")) { + error = cnss_sdio_init_bus_bandwidth(); + if (error) { + dev_err(&pdev->dev, "Failed to init bus bandwidth\n"); + goto err_bus_bandwidth_init; + } + } + dev_info(&pdev->dev, "CNSS SDIO Driver registered\n"); + return 0; + +err_bus_bandwidth_init: + cnss_subsys_exit(); +err_subsys_init: + cnss_ramdump_cleanup(); +err_ramdump_create: + cnss_sdio_wlan_exit(); +err_wlan_dsrc_enable_regulator: + info->cnss_hw_state = false; + regulator_put(cnss_pdata->regulator.wlan_vreg_dsrc); +err_wlan_enable_regulator: + regulator_put(cnss_pdata->regulator.wlan_xtal); + regulator_put(cnss_pdata->regulator.wlan_io); + cnss_pdata = NULL; + return error; +} + +static int cnss_sdio_remove(struct platform_device *pdev) +{ + struct cnss_sdio_info *info; + struct cnss_cap_tsf_info *tsf_info; + + if (!cnss_pdata) + return -ENODEV; + + info = &cnss_pdata->cnss_sdio_info; + tsf_info = &info->cap_tsf_info; + + cnss_sdio_tsf_deinit(tsf_info); + cnss_sdio_deinit_bus_bandwidth(); + cnss_sdio_wlan_exit(); + cnss_subsys_exit(); + cnss_ramdump_cleanup(); + cnss_put_hw_resources(info->dev); + cnss_sdio_release_resource(); + cnss_pdata = NULL; + return 0; +} + +int cnss_sdio_set_wlan_mac_address(const u8 *in, u32 len) +{ + return 0; +} + +u8 *cnss_sdio_get_wlan_mac_address(u32 *num) +{ + *num = 0; + return NULL; +} + +int cnss_sdio_power_down(struct device *dev) +{ + return 0; +} + +int cnss_sdio_power_up(struct device *dev) +{ + return 0; +} + +static const struct of_device_id cnss_sdio_dt_match[] = { + {.compatible = "qcom,cnss_sdio"}, + {} +}; +MODULE_DEVICE_TABLE(of, cnss_sdio_dt_match); + +static struct platform_driver cnss_sdio_driver = { + .probe = cnss_sdio_probe, + .remove = cnss_sdio_remove, + .driver = { + .name = "cnss_sdio", + .of_match_table = cnss_sdio_dt_match, + }, +}; + +static int __init cnss_sdio_init(void) +{ + return platform_driver_register(&cnss_sdio_driver); +} + +static void __exit cnss_sdio_exit(void) +{ + platform_driver_unregister(&cnss_sdio_driver); +} + +module_init(cnss_sdio_init); +module_exit(cnss_sdio_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/qcom/pinctrl-seraph.c b/drivers/pinctrl/qcom/pinctrl-seraph.c index af812962d029..c28d5b246199 100644 --- a/drivers/pinctrl/qcom/pinctrl-seraph.c +++ b/drivers/pinctrl/qcom/pinctrl-seraph.c @@ -255,30 +255,6 @@ static const struct pinctrl_pin_desc seraph_pins[] = { PINCTRL_PIN(129, "GPIO_129"), PINCTRL_PIN(130, "GPIO_130"), PINCTRL_PIN(131, "GPIO_131"), - PINCTRL_PIN(132, "GPIO_132"), - PINCTRL_PIN(133, "GPIO_133"), - PINCTRL_PIN(134, "GPIO_134"), - PINCTRL_PIN(135, "GPIO_135"), - PINCTRL_PIN(136, "GPIO_136"), - PINCTRL_PIN(137, "GPIO_137"), - PINCTRL_PIN(138, "GPIO_138"), - PINCTRL_PIN(139, "GPIO_139"), - PINCTRL_PIN(140, "GPIO_140"), - PINCTRL_PIN(141, "GPIO_141"), - PINCTRL_PIN(142, "GPIO_142"), - PINCTRL_PIN(143, "GPIO_143"), - PINCTRL_PIN(144, "GPIO_144"), - PINCTRL_PIN(145, "GPIO_145"), - PINCTRL_PIN(146, "GPIO_146"), - PINCTRL_PIN(147, "GPIO_147"), - PINCTRL_PIN(148, "GPIO_148"), - PINCTRL_PIN(149, "GPIO_149"), - PINCTRL_PIN(150, "GPIO_150"), - PINCTRL_PIN(151, "GPIO_151"), - PINCTRL_PIN(152, "GPIO_152"), - PINCTRL_PIN(153, "GPIO_153"), - PINCTRL_PIN(154, "GPIO_154"), - PINCTRL_PIN(155, "GPIO_155"), }; #define DECLARE_MSM_GPIO_PINS(pin) \ @@ -415,36 +391,13 @@ DECLARE_MSM_GPIO_PINS(128); DECLARE_MSM_GPIO_PINS(129); DECLARE_MSM_GPIO_PINS(130); DECLARE_MSM_GPIO_PINS(131); -DECLARE_MSM_GPIO_PINS(132); -DECLARE_MSM_GPIO_PINS(133); -DECLARE_MSM_GPIO_PINS(134); -DECLARE_MSM_GPIO_PINS(135); -DECLARE_MSM_GPIO_PINS(136); -DECLARE_MSM_GPIO_PINS(137); -DECLARE_MSM_GPIO_PINS(138); -DECLARE_MSM_GPIO_PINS(139); -DECLARE_MSM_GPIO_PINS(140); -DECLARE_MSM_GPIO_PINS(141); -DECLARE_MSM_GPIO_PINS(142); -DECLARE_MSM_GPIO_PINS(143); -DECLARE_MSM_GPIO_PINS(144); -DECLARE_MSM_GPIO_PINS(145); -DECLARE_MSM_GPIO_PINS(146); -DECLARE_MSM_GPIO_PINS(147); -DECLARE_MSM_GPIO_PINS(148); -DECLARE_MSM_GPIO_PINS(149); -DECLARE_MSM_GPIO_PINS(150); -DECLARE_MSM_GPIO_PINS(151); -DECLARE_MSM_GPIO_PINS(152); -DECLARE_MSM_GPIO_PINS(153); -DECLARE_MSM_GPIO_PINS(154); -DECLARE_MSM_GPIO_PINS(155); enum seraph_functions { msm_mux_gpio, msm_mux_RESOUT_GPIO_N, msm_mux_aoss_cti, + msm_mux_aoss_ts, msm_mux_atest_char0, msm_mux_atest_char1, msm_mux_atest_char2, @@ -460,36 +413,40 @@ enum seraph_functions { msm_mux_audio_ref_clk, msm_mux_cam_asc_mclk4, msm_mux_cam_mclk, - msm_mux_cci01_async_in0, - msm_mux_cci01_async_in1, - msm_mux_cci01_async_in2, - msm_mux_cci01_timer0, - msm_mux_cci01_timer1, - msm_mux_cci01_timer2, - msm_mux_cci01_timer3, - msm_mux_cci01_timer4, - msm_mux_cci0_i2c, + msm_mux_cci0_async_in0, + msm_mux_cci0_async_in1, + msm_mux_cci0_async_in2, msm_mux_cci0_i2c_scl0, + msm_mux_cci0_i2c_scl1, + msm_mux_cci0_i2c_scl2, + msm_mux_cci0_i2c_scl3, msm_mux_cci0_i2c_sda0, - msm_mux_cci1_i2c, - msm_mux_cci1_i2c_scl2, - msm_mux_cci1_i2c_sda2, - msm_mux_cci23_async_in0, - msm_mux_cci23_async_in1, - msm_mux_cci23_async_in2, - msm_mux_cci23_timer0, - msm_mux_cci23_timer1, - msm_mux_cci23_timer2, - msm_mux_cci23_timer3, - msm_mux_cci23_timer4, - msm_mux_cci2_i2c_scl4, - msm_mux_cci2_i2c_scl5, - msm_mux_cci2_i2c_sda4, - msm_mux_cci2_i2c_sda5, - msm_mux_cci3_i2c_scl6, - msm_mux_cci3_i2c_scl7, - msm_mux_cci3_i2c_sda6, - msm_mux_cci3_i2c_sda7, + msm_mux_cci0_i2c_sda1, + msm_mux_cci0_i2c_sda2, + msm_mux_cci0_i2c_sda3, + msm_mux_cci0_timer0, + msm_mux_cci0_timer1, + msm_mux_cci0_timer2, + msm_mux_cci0_timer3_mira, + msm_mux_cci0_timer3_mirb, + msm_mux_cci0_timer4_mira, + msm_mux_cci0_timer4_mirb, + msm_mux_cci1_async_in0, + msm_mux_cci1_async_in1, + msm_mux_cci1_async_in2, + msm_mux_cci1_i2c_scl4, + msm_mux_cci1_i2c_scl5, + msm_mux_cci1_i2c_scl6, + msm_mux_cci1_i2c_scl7, + msm_mux_cci1_i2c_sda4, + msm_mux_cci1_i2c_sda5, + msm_mux_cci1_i2c_sda6, + msm_mux_cci1_i2c_sda7, + msm_mux_cci1_timer0, + msm_mux_cci1_timer1, + msm_mux_cci1_timer2, + msm_mux_cci1_timer3, + msm_mux_cci1_timer4, msm_mux_dbg_out_clk, msm_mux_ddr_bist_complete, msm_mux_ddr_bist_fail, @@ -498,12 +455,24 @@ enum seraph_functions { msm_mux_ddr_pxi0, msm_mux_dp0_hot, msm_mux_gcc_gp1, + msm_mux_gcc_gp10_clk, + msm_mux_gcc_gp11_clk, msm_mux_gcc_gp2, msm_mux_gcc_gp3, + msm_mux_gcc_gp4_clk, + msm_mux_gcc_gp5_clk, + msm_mux_gcc_gp6_clk, + msm_mux_gcc_gp7_clk, + msm_mux_gcc_gp8_clk, + msm_mux_gcc_gp9_clk, + msm_mux_host2wlan_sol_mira, + msm_mux_host2wlan_sol_mirb, msm_mux_i2s0_data0, msm_mux_i2s0_data1, msm_mux_i2s0_sck, msm_mux_i2s0_ws, + msm_mux_i3c_s_scl, + msm_mux_i3c_s_sda, msm_mux_ibi_i3c, msm_mux_jitter_bist, msm_mux_mdp_vsync, @@ -514,6 +483,7 @@ enum seraph_functions { msm_mux_mdp_vsync_e, msm_mux_pcie0_clk_req_n, msm_mux_pcie1_clk_req_n, + msm_mux_pcie1_rst_n, msm_mux_phase_flag0, msm_mux_phase_flag1, msm_mux_phase_flag10, @@ -552,6 +522,16 @@ enum seraph_functions { msm_mux_prng_rosc1, msm_mux_prng_rosc2, msm_mux_prng_rosc3, + msm_mux_pwm_0, + msm_mux_pwm_1, + msm_mux_pwm_2, + msm_mux_pwm_3, + msm_mux_pwm_4, + msm_mux_pwm_5, + msm_mux_pwm_6, + msm_mux_pwm_7, + msm_mux_pwm_8, + msm_mux_pwm_9, msm_mux_qdss_cti, msm_mux_qdss_gpio_traceclk, msm_mux_qdss_gpio_tracectl, @@ -582,7 +562,6 @@ enum seraph_functions { msm_mux_qup0_se0_l1, msm_mux_qup0_se0_l2, msm_mux_qup0_se0_l3, - msm_mux_qup0_se0_l4, msm_mux_qup0_se1_l0, msm_mux_qup0_se1_l1, msm_mux_qup0_se1_l2, @@ -591,6 +570,7 @@ enum seraph_functions { msm_mux_qup0_se2_l1, msm_mux_qup0_se2_l2, msm_mux_qup0_se2_l3, + msm_mux_qup0_se2_l4, msm_mux_qup0_se3_l0, msm_mux_qup0_se3_l1, msm_mux_qup0_se3_l2, @@ -600,7 +580,8 @@ enum seraph_functions { msm_mux_qup0_se4_l1, msm_mux_qup0_se4_l2, msm_mux_qup0_se4_l3, - msm_mux_qup0_se4_l4, + msm_mux_qup0_se4_l4_mira, + msm_mux_qup0_se4_l4_mirb, msm_mux_qup0_se5_l0, msm_mux_qup0_se5_l1, msm_mux_qup0_se5_l2, @@ -623,25 +604,30 @@ enum seraph_functions { msm_mux_qup1_se3_l3, msm_mux_qup1_se4_l0, msm_mux_qup1_se4_l1, - msm_mux_qup1_se4_l2, - msm_mux_qup1_se4_l3, - msm_mux_qup1_se5_l0, - msm_mux_qup1_se5_l1, + msm_mux_qup1_se4_l2_mira, + msm_mux_qup1_se4_l2_mirb, + msm_mux_qup1_se4_l3_mira, + msm_mux_qup1_se4_l3_mirb, + msm_mux_qup1_se5_l0_mira, + msm_mux_qup1_se5_l0_mirb, + msm_mux_qup1_se5_l1_mira, + msm_mux_qup1_se5_l1_mirb, msm_mux_qup1_se5_l2, msm_mux_qup1_se5_l3, - msm_mux_sys_throttle_mira, - msm_mux_sys_throttle_mirb, + msm_mux_sdc2_clk, + msm_mux_sdc2_cmd, + msm_mux_sdc2_fb_clk, msm_mux_tb_trig_sdc1, + msm_mux_tb_trig_sdc2, msm_mux_tgu_ch0_trigout, msm_mux_tmess_prng0, msm_mux_tmess_prng1, msm_mux_tmess_prng2, msm_mux_tmess_prng3, + msm_mux_tsense_mirnat_RESERVED, msm_mux_tsense_pwm1, - msm_mux_tsense_pwm2, - msm_mux_tsense_pwm3, msm_mux_usb0_hs, - msm_mux_usb0_phy, + msm_mux_usb0_phy_ps, msm_mux_vsense_trigger_mirnat, msm_mux_wcn_sw, msm_mux_wcn_sw_ctrl, @@ -671,422 +657,493 @@ static const char *const gpio_groups[] = { "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131", - "gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137", - "gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143", - "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149", - "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155", }; static const char *const RESOUT_GPIO_N_groups[] = { - "gpio101", + "gpio63", }; static const char *const aoss_cti_groups[] = { - "gpio0", "gpio1", "gpio2", "gpio3", + "gpio61", "gpio62", "gpio67", "gpio68", +}; + +static const char *const aoss_ts_groups[] = { + "gpio0", "gpio1", "gpio2", "gpio3", "gpio28", "gpio29", + "gpio30", "gpio69", "gpio70", "gpio71", "gpio72", }; static const char *const atest_char0_groups[] = { - "gpio65", + "gpio57", }; static const char *const atest_char1_groups[] = { - "gpio66", + "gpio58", }; static const char *const atest_char2_groups[] = { - "gpio67", -}; - -static const char *const atest_char3_groups[] = { - "gpio68", -}; - -static const char *const atest_char_start_groups[] = { - "gpio77", -}; - -static const char *const atest_usb0_groups[] = { - "gpio129", -}; - -static const char *const atest_usb00_groups[] = { - "gpio72", -}; - -static const char *const atest_usb01_groups[] = { - "gpio73", -}; - -static const char *const atest_usb02_groups[] = { - "gpio74", -}; - -static const char *const atest_usb03_groups[] = { - "gpio75", -}; - -static const char *const audio_ext_mclk0_groups[] = { - "gpio104", -}; - -static const char *const audio_ext_mclk1_groups[] = { - "gpio103", -}; - -static const char *const audio_ref_clk_groups[] = { - "gpio103", -}; - -static const char *const cam_asc_mclk4_groups[] = { - "gpio73", -}; - -static const char *const cam_mclk_groups[] = { - "gpio69", "gpio70", "gpio71", "gpio72", "gpio74", "gpio75", -}; - -static const char *const cci01_async_in0_groups[] = { - "gpio82", -}; - -static const char *const cci01_async_in1_groups[] = { - "gpio80", -}; - -static const char *const cci01_async_in2_groups[] = { - "gpio81", -}; - -static const char *const cci01_timer0_groups[] = { - "gpio77", -}; - -static const char *const cci01_timer1_groups[] = { - "gpio78", -}; - -static const char *const cci01_timer2_groups[] = { - "gpio79", -}; - -static const char *const cci01_timer3_groups[] = { - "gpio80", -}; - -static const char *const cci01_timer4_groups[] = { - "gpio81", -}; - -static const char *const cci0_i2c_groups[] = { - "gpio87", "gpio88", "gpio111", "gpio112", -}; - -static const char *const cci0_i2c_scl0_groups[] = { - "gpio86", -}; - -static const char *const cci0_i2c_sda0_groups[] = { - "gpio85", -}; - -static const char *const cci1_i2c_groups[] = { - "gpio83", "gpio84", "gpio113", "gpio114", -}; - -static const char *const cci1_i2c_scl2_groups[] = { - "gpio90", -}; - -static const char *const cci1_i2c_sda2_groups[] = { - "gpio89", -}; - -static const char *const cci23_async_in0_groups[] = { - "gpio116", -}; - -static const char *const cci23_async_in1_groups[] = { - "gpio117", -}; - -static const char *const cci23_async_in2_groups[] = { - "gpio118", -}; - -static const char *const cci23_timer0_groups[] = { - "gpio104", -}; - -static const char *const cci23_timer1_groups[] = { - "gpio105", -}; - -static const char *const cci23_timer2_groups[] = { - "gpio106", -}; - -static const char *const cci23_timer3_groups[] = { - "gpio107", -}; - -static const char *const cci23_timer4_groups[] = { - "gpio108", -}; - -static const char *const cci2_i2c_scl4_groups[] = { - "gpio92", -}; - -static const char *const cci2_i2c_scl5_groups[] = { - "gpio109", -}; - -static const char *const cci2_i2c_sda4_groups[] = { - "gpio91", -}; - -static const char *const cci2_i2c_sda5_groups[] = { - "gpio110", -}; - -static const char *const cci3_i2c_scl6_groups[] = { - "gpio79", -}; - -static const char *const cci3_i2c_scl7_groups[] = { - "gpio81", -}; - -static const char *const cci3_i2c_sda6_groups[] = { - "gpio78", -}; - -static const char *const cci3_i2c_sda7_groups[] = { - "gpio80", -}; - -static const char *const dbg_out_clk_groups[] = { - "gpio75", -}; - -static const char *const ddr_bist_complete_groups[] = { - "gpio44", -}; - -static const char *const ddr_bist_fail_groups[] = { - "gpio40", -}; - -static const char *const ddr_bist_start_groups[] = { - "gpio41", -}; - -static const char *const ddr_bist_stop_groups[] = { - "gpio45", -}; - -static const char *const ddr_pxi0_groups[] = { - "gpio54", "gpio55", -}; - -static const char *const dp0_hot_groups[] = { - "gpio45", "gpio103", -}; - -static const char *const gcc_gp1_groups[] = { - "gpio130", "gpio149", -}; - -static const char *const gcc_gp2_groups[] = { - "gpio91", "gpio131", -}; - -static const char *const gcc_gp3_groups[] = { - "gpio92", "gpio132", -}; - -static const char *const i2s0_data0_groups[] = { - "gpio106", -}; - -static const char *const i2s0_data1_groups[] = { - "gpio107", -}; - -static const char *const i2s0_sck_groups[] = { - "gpio105", -}; - -static const char *const i2s0_ws_groups[] = { - "gpio108", -}; - -static const char *const ibi_i3c_groups[] = { - "gpio0", "gpio1", "gpio4", "gpio5", "gpio20", "gpio21", -}; - -static const char *const jitter_bist_groups[] = { - "gpio73", -}; - -static const char *const mdp_vsync_groups[] = { - "gpio49", "gpio50", "gpio97", "gpio98", -}; - -static const char *const mdp_vsync0_out_groups[] = { - "gpio49", -}; - -static const char *const mdp_vsync1_out_groups[] = { - "gpio49", -}; - -static const char *const mdp_vsync2_out_groups[] = { - "gpio50", -}; - -static const char *const mdp_vsync3_out_groups[] = { - "gpio50", -}; - -static const char *const mdp_vsync_e_groups[] = { - "gpio88", -}; - -static const char *const pcie0_clk_req_n_groups[] = { - "gpio56", -}; - -static const char *const pcie1_clk_req_n_groups[] = { "gpio59", }; -static const char *const phase_flag0_groups[] = { - "gpio155", +static const char *const atest_char3_groups[] = { + "gpio60", }; -static const char *const phase_flag1_groups[] = { - "gpio141", +static const char *const atest_char_start_groups[] = { + "gpio40", }; -static const char *const phase_flag10_groups[] = { - "gpio137", +static const char *const atest_usb0_groups[] = { + "gpio94", }; -static const char *const phase_flag11_groups[] = { - "gpio136", +static const char *const atest_usb00_groups[] = { + "gpio88", }; -static const char *const phase_flag12_groups[] = { - "gpio134", +static const char *const atest_usb01_groups[] = { + "gpio93", }; -static const char *const phase_flag13_groups[] = { - "gpio125", +static const char *const atest_usb02_groups[] = { + "gpio90", }; -static const char *const phase_flag14_groups[] = { - "gpio144", +static const char *const atest_usb03_groups[] = { + "gpio91", }; -static const char *const phase_flag15_groups[] = { - "gpio142", +static const char *const audio_ext_mclk0_groups[] = { + "gpio30", }; -static const char *const phase_flag16_groups[] = { - "gpio139", +static const char *const audio_ext_mclk1_groups[] = { + "gpio70", }; -static const char *const phase_flag17_groups[] = { - "gpio138", +static const char *const audio_ref_clk_groups[] = { + "gpio70", }; -static const char *const phase_flag18_groups[] = { +static const char *const cam_asc_mclk4_groups[] = { + "gpio49", +}; + +static const char *const cam_mclk_groups[] = { + "gpio45", "gpio46", "gpio47", "gpio48", "gpio50", "gpio125", + "gpio126", +}; + +static const char *const cci0_async_in0_groups[] = { + "gpio56", +}; + +static const char *const cci0_async_in1_groups[] = { + "gpio60", +}; + +static const char *const cci0_async_in2_groups[] = { + "gpio59", +}; + +static const char *const cci0_i2c_scl0_groups[] = { + "gpio58", +}; + +static const char *const cci0_i2c_scl1_groups[] = { + "gpio60", +}; + +static const char *const cci0_i2c_scl2_groups[] = { + "gpio128", +}; + +static const char *const cci0_i2c_scl3_groups[] = { "gpio130", }; -static const char *const phase_flag19_groups[] = { - "gpio150", +static const char *const cci0_i2c_sda0_groups[] = { + "gpio57", }; -static const char *const phase_flag2_groups[] = { - "gpio154", +static const char *const cci0_i2c_sda1_groups[] = { + "gpio59", }; -static const char *const phase_flag20_groups[] = { - "gpio151", +static const char *const cci0_i2c_sda2_groups[] = { + "gpio127", }; -static const char *const phase_flag21_groups[] = { - "gpio131", -}; - -static const char *const phase_flag22_groups[] = { - "gpio124", -}; - -static const char *const phase_flag23_groups[] = { - "gpio152", -}; - -static const char *const phase_flag24_groups[] = { - "gpio120", -}; - -static const char *const phase_flag25_groups[] = { - "gpio119", -}; - -static const char *const phase_flag26_groups[] = { - "gpio117", -}; - -static const char *const phase_flag27_groups[] = { - "gpio118", -}; - -static const char *const phase_flag28_groups[] = { - "gpio153", -}; - -static const char *const phase_flag29_groups[] = { - "gpio148", -}; - -static const char *const phase_flag3_groups[] = { - "gpio147", -}; - -static const char *const phase_flag30_groups[] = { - "gpio146", -}; - -static const char *const phase_flag31_groups[] = { - "gpio145", -}; - -static const char *const phase_flag4_groups[] = { - "gpio149", -}; - -static const char *const phase_flag5_groups[] = { +static const char *const cci0_i2c_sda3_groups[] = { "gpio129", }; +static const char *const cci0_timer0_groups[] = { + "gpio51", +}; + +static const char *const cci0_timer1_groups[] = { + "gpio52", +}; + +static const char *const cci0_timer2_groups[] = { + "gpio53", +}; + +static const char *const cci0_timer3_mira_groups[] = { + "gpio54", +}; + +static const char *const cci0_timer3_mirb_groups[] = { + "gpio42", +}; + +static const char *const cci0_timer4_mira_groups[] = { + "gpio55", +}; + +static const char *const cci0_timer4_mirb_groups[] = { + "gpio43", +}; + +static const char *const cci1_async_in0_groups[] = { + "gpio65", +}; + +static const char *const cci1_async_in1_groups[] = { + "gpio50", +}; + +static const char *const cci1_async_in2_groups[] = { + "gpio51", +}; + +static const char *const cci1_i2c_scl4_groups[] = { + "gpio62", +}; + +static const char *const cci1_i2c_scl5_groups[] = { + "gpio113", +}; + +static const char *const cci1_i2c_scl6_groups[] = { + "gpio53", +}; + +static const char *const cci1_i2c_scl7_groups[] = { + "gpio55", +}; + +static const char *const cci1_i2c_sda4_groups[] = { + "gpio61", +}; + +static const char *const cci1_i2c_sda5_groups[] = { + "gpio112", +}; + +static const char *const cci1_i2c_sda6_groups[] = { + "gpio52", +}; + +static const char *const cci1_i2c_sda7_groups[] = { + "gpio54", +}; + +static const char *const cci1_timer0_groups[] = { + "gpio56", +}; + +static const char *const cci1_timer1_groups[] = { + "gpio65", +}; + +static const char *const cci1_timer2_groups[] = { + "gpio41", +}; + +static const char *const cci1_timer3_groups[] = { + "gpio123", +}; + +static const char *const cci1_timer4_groups[] = { + "gpio44", +}; + +static const char *const dbg_out_clk_groups[] = { + "gpio80", +}; + +static const char *const ddr_bist_complete_groups[] = { + "gpio43", +}; + +static const char *const ddr_bist_fail_groups[] = { + "gpio41", +}; + +static const char *const ddr_bist_start_groups[] = { + "gpio42", +}; + +static const char *const ddr_bist_stop_groups[] = { + "gpio44", +}; + +static const char *const ddr_pxi0_groups[] = { + "gpio34", "gpio35", +}; + +static const char *const dp0_hot_groups[] = { + "gpio45", "gpio63", +}; + +static const char *const gcc_gp1_groups[] = { + "gpio61", "gpio98", +}; + +static const char *const gcc_gp10_clk_groups[] = { + "gpio96", +}; + +static const char *const gcc_gp11_clk_groups[] = { + "gpio97", +}; + +static const char *const gcc_gp2_groups[] = { + "gpio62", "gpio99", +}; + +static const char *const gcc_gp3_groups[] = { + "gpio39", "gpio100", +}; + +static const char *const gcc_gp4_clk_groups[] = { + "gpio102", +}; + +static const char *const gcc_gp5_clk_groups[] = { + "gpio103", +}; + +static const char *const gcc_gp6_clk_groups[] = { + "gpio104", +}; + +static const char *const gcc_gp7_clk_groups[] = { + "gpio106", +}; + +static const char *const gcc_gp8_clk_groups[] = { + "gpio107", +}; + +static const char *const gcc_gp9_clk_groups[] = { + "gpio95", +}; + +static const char *const host2wlan_sol_mira_groups[] = { + "gpio73", +}; + +static const char *const host2wlan_sol_mirb_groups[] = { + "gpio55", +}; + +static const char *const i2s0_data0_groups[] = { + "gpio16", +}; + +static const char *const i2s0_data1_groups[] = { + "gpio17", +}; + +static const char *const i2s0_sck_groups[] = { + "gpio18", +}; + +static const char *const i2s0_ws_groups[] = { + "gpio19", +}; + +static const char *const i3c_s_scl_groups[] = { + "gpio5", +}; + +static const char *const i3c_s_sda_groups[] = { + "gpio4", +}; + +static const char *const ibi_i3c_groups[] = { + "gpio4", "gpio5", "gpio20", "gpio21", "gpio22", "gpio23", + "gpio61", "gpio62", +}; + +static const char *const jitter_bist_groups[] = { + "gpio64", +}; + +static const char *const mdp_vsync_groups[] = { + "gpio12", "gpio13", "gpio34", "gpio35", +}; + +static const char *const mdp_vsync0_out_groups[] = { + "gpio34", +}; + +static const char *const mdp_vsync1_out_groups[] = { + "gpio34", +}; + +static const char *const mdp_vsync2_out_groups[] = { + "gpio35", +}; + +static const char *const mdp_vsync3_out_groups[] = { + "gpio35", +}; + +static const char *const mdp_vsync_e_groups[] = { + "gpio30", +}; + +static const char *const pcie0_clk_req_n_groups[] = { + "gpio38", +}; + +static const char *const pcie1_clk_req_n_groups[] = { + "gpio40", +}; + +static const char *const pcie1_rst_n_groups[] = { + "gpio7", +}; + +static const char *const phase_flag0_groups[] = { + "gpio92", +}; + +static const char *const phase_flag1_groups[] = { + "gpio89", +}; + +static const char *const phase_flag10_groups[] = { + "gpio51", +}; + +static const char *const phase_flag11_groups[] = { + "gpio56", +}; + +static const char *const phase_flag12_groups[] = { + "gpio33", +}; + +static const char *const phase_flag13_groups[] = { + "gpio34", +}; + +static const char *const phase_flag14_groups[] = { + "gpio35", +}; + +static const char *const phase_flag15_groups[] = { + "gpio24", +}; + +static const char *const phase_flag16_groups[] = { + "gpio25", +}; + +static const char *const phase_flag17_groups[] = { + "gpio26", +}; + +static const char *const phase_flag18_groups[] = { + "gpio40", +}; + +static const char *const phase_flag19_groups[] = { + "gpio43", +}; + +static const char *const phase_flag2_groups[] = { + "gpio8", +}; + +static const char *const phase_flag20_groups[] = { + "gpio44", +}; + +static const char *const phase_flag21_groups[] = { + "gpio57", +}; + +static const char *const phase_flag22_groups[] = { + "gpio58", +}; + +static const char *const phase_flag23_groups[] = { + "gpio59", +}; + +static const char *const phase_flag24_groups[] = { + "gpio60", +}; + +static const char *const phase_flag25_groups[] = { + "gpio71", +}; + +static const char *const phase_flag26_groups[] = { + "gpio72", +}; + +static const char *const phase_flag27_groups[] = { + "gpio27", +}; + +static const char *const phase_flag28_groups[] = { + "gpio52", +}; + +static const char *const phase_flag29_groups[] = { + "gpio75", +}; + +static const char *const phase_flag3_groups[] = { + "gpio9", +}; + +static const char *const phase_flag30_groups[] = { + "gpio76", +}; + +static const char *const phase_flag31_groups[] = { + "gpio77", +}; + +static const char *const phase_flag4_groups[] = { + "gpio10", +}; + +static const char *const phase_flag5_groups[] = { + "gpio11", +}; + static const char *const phase_flag6_groups[] = { - "gpio135", + "gpio14", }; static const char *const phase_flag7_groups[] = { - "gpio133", + "gpio15", }; static const char *const phase_flag8_groups[] = { - "gpio143", + "gpio19", }; static const char *const phase_flag9_groups[] = { - "gpio140", + "gpio105", }; static const char *const pll_bist_sync_groups[] = { @@ -1094,7 +1151,7 @@ static const char *const pll_bist_sync_groups[] = { }; static const char *const pll_clk_aux_groups[] = { - "gpio97", + "gpio4", }; static const char *const prng_rosc0_groups[] = { @@ -1102,7 +1159,7 @@ static const char *const prng_rosc0_groups[] = { }; static const char *const prng_rosc1_groups[] = { - "gpio64", + "gpio73", }; static const char *const prng_rosc2_groups[] = { @@ -1113,163 +1170,203 @@ static const char *const prng_rosc3_groups[] = { "gpio66", }; -static const char *const qdss_cti_groups[] = { - "gpio27", "gpio31", "gpio77", "gpio78", "gpio82", "gpio83", - "gpio146", "gpio151", -}; - -static const char *const qdss_gpio_traceclk_groups[] = { - "gpio128", -}; - -static const char *const qdss_gpio_tracectl_groups[] = { - "gpio127", -}; - -static const char *const qdss_gpio_tracedata0_groups[] = { - "gpio38", -}; - -static const char *const qdss_gpio_tracedata1_groups[] = { - "gpio39", -}; - -static const char *const qdss_gpio_tracedata10_groups[] = { - "gpio130", -}; - -static const char *const qdss_gpio_tracedata11_groups[] = { - "gpio131", -}; - -static const char *const qdss_gpio_tracedata12_groups[] = { - "gpio132", -}; - -static const char *const qdss_gpio_tracedata13_groups[] = { - "gpio133", -}; - -static const char *const qdss_gpio_tracedata14_groups[] = { - "gpio129", -}; - -static const char *const qdss_gpio_tracedata15_groups[] = { - "gpio126", -}; - -static const char *const qdss_gpio_tracedata2_groups[] = { - "gpio68", -}; - -static const char *const qdss_gpio_tracedata3_groups[] = { - "gpio69", -}; - -static const char *const qdss_gpio_tracedata4_groups[] = { - "gpio62", -}; - -static const char *const qdss_gpio_tracedata5_groups[] = { - "gpio63", -}; - -static const char *const qdss_gpio_tracedata6_groups[] = { - "gpio40", -}; - -static const char *const qdss_gpio_tracedata7_groups[] = { - "gpio41", -}; - -static const char *const qdss_gpio_tracedata8_groups[] = { - "gpio42", -}; - -static const char *const qdss_gpio_tracedata9_groups[] = { - "gpio43", -}; - -static const char *const qspi0_clk_groups[] = { - "gpio35", -}; - -static const char *const qspi0_cs0_n_groups[] = { - "gpio36", -}; - -static const char *const qspi0_cs1_n_groups[] = { - "gpio38", -}; - -static const char *const qspi0_data0_groups[] = { - "gpio32", -}; - -static const char *const qspi0_data1_groups[] = { - "gpio37", -}; - -static const char *const qspi0_data2_groups[] = { - "gpio33", -}; - -static const char *const qspi0_data3_groups[] = { - "gpio34", -}; - -static const char *const qup0_se0_l0_groups[] = { - "gpio0", -}; - -static const char *const qup0_se0_l1_groups[] = { - "gpio1", -}; - -static const char *const qup0_se0_l2_groups[] = { +static const char *const pwm_0_groups[] = { "gpio2", }; -static const char *const qup0_se0_l3_groups[] = { +static const char *const pwm_1_groups[] = { "gpio3", }; -static const char *const qup0_se0_l4_groups[] = { - "gpio93", -}; - -static const char *const qup0_se1_l0_groups[] = { - "gpio2", -}; - -static const char *const qup0_se1_l1_groups[] = { - "gpio3", -}; - -static const char *const qup0_se1_l2_groups[] = { - "gpio61", -}; - -static const char *const qup0_se1_l3_groups[] = { - "gpio62", -}; - -static const char *const qup0_se2_l0_groups[] = { - "gpio22", -}; - -static const char *const qup0_se2_l1_groups[] = { - "gpio23", -}; - -static const char *const qup0_se2_l2_groups[] = { +static const char *const pwm_2_groups[] = { "gpio12", }; -static const char *const qup0_se2_l3_groups[] = { +static const char *const pwm_3_groups[] = { "gpio13", }; +static const char *const pwm_4_groups[] = { + "gpio16", +}; + +static const char *const pwm_5_groups[] = { + "gpio17", +}; + +static const char *const pwm_6_groups[] = { + "gpio28", +}; + +static const char *const pwm_7_groups[] = { + "gpio29", +}; + +static const char *const pwm_8_groups[] = { + "gpio18", +}; + +static const char *const pwm_9_groups[] = { + "gpio19", +}; + +static const char *const qdss_cti_groups[] = { + "gpio43", "gpio44", "gpio61", "gpio62", "gpio65", "gpio66", + "gpio69", "gpio70", "gpio116", "gpio117", +}; + +static const char *const qdss_gpio_traceclk_groups[] = { + "gpio73", +}; + +static const char *const qdss_gpio_tracectl_groups[] = { + "gpio74", +}; + +static const char *const qdss_gpio_tracedata0_groups[] = { + "gpio33", +}; + +static const char *const qdss_gpio_tracedata1_groups[] = { + "gpio32", +}; + +static const char *const qdss_gpio_tracedata10_groups[] = { + "gpio24", +}; + +static const char *const qdss_gpio_tracedata11_groups[] = { + "gpio39", +}; + +static const char *const qdss_gpio_tracedata12_groups[] = { + "gpio38", +}; + +static const char *const qdss_gpio_tracedata13_groups[] = { + "gpio37", +}; + +static const char *const qdss_gpio_tracedata14_groups[] = { + "gpio15", +}; + +static const char *const qdss_gpio_tracedata15_groups[] = { + "gpio14", +}; + +static const char *const qdss_gpio_tracedata2_groups[] = { + "gpio31", +}; + +static const char *const qdss_gpio_tracedata3_groups[] = { + "gpio53", +}; + +static const char *const qdss_gpio_tracedata4_groups[] = { + "gpio52", +}; + +static const char *const qdss_gpio_tracedata5_groups[] = { + "gpio60", +}; + +static const char *const qdss_gpio_tracedata6_groups[] = { + "gpio59", +}; + +static const char *const qdss_gpio_tracedata7_groups[] = { + "gpio27", +}; + +static const char *const qdss_gpio_tracedata8_groups[] = { + "gpio26", +}; + +static const char *const qdss_gpio_tracedata9_groups[] = { + "gpio25", +}; + +static const char *const qspi0_clk_groups[] = { + "gpio2", +}; + +static const char *const qspi0_cs0_n_groups[] = { + "gpio3", +}; + +static const char *const qspi0_cs1_n_groups[] = { + "gpio68", +}; + +static const char *const qspi0_data0_groups[] = { + "gpio41", +}; + +static const char *const qspi0_data1_groups[] = { + "gpio42", +}; + +static const char *const qspi0_data2_groups[] = { + "gpio63", +}; + +static const char *const qspi0_data3_groups[] = { + "gpio67", +}; + +static const char *const qup0_se0_l0_groups[] = { + "gpio22", +}; + +static const char *const qup0_se0_l1_groups[] = { + "gpio23", +}; + +static const char *const qup0_se0_l2_groups[] = { + "gpio12", +}; + +static const char *const qup0_se0_l3_groups[] = { + "gpio13", +}; + +static const char *const qup0_se1_l0_groups[] = { + "gpio67", +}; + +static const char *const qup0_se1_l1_groups[] = { + "gpio68", +}; + +static const char *const qup0_se1_l2_groups[] = { + "gpio41", +}; + +static const char *const qup0_se1_l3_groups[] = { + "gpio42", +}; + +static const char *const qup0_se2_l0_groups[] = { + "gpio0", +}; + +static const char *const qup0_se2_l1_groups[] = { + "gpio1", +}; + +static const char *const qup0_se2_l2_groups[] = { + "gpio2", +}; + +static const char *const qup0_se2_l3_groups[] = { + "gpio3", +}; + +static const char *const qup0_se2_l4_groups[] = { + "gpio63", +}; + static const char *const qup0_se3_l0_groups[] = { "gpio16", }; @@ -1287,7 +1384,7 @@ static const char *const qup0_se3_l3_groups[] = { }; static const char *const qup0_se3_l4_groups[] = { - "gpio41", + "gpio30", }; static const char *const qup0_se4_l0_groups[] = { @@ -1306,40 +1403,44 @@ static const char *const qup0_se4_l3_groups[] = { "gpio23", }; -static const char *const qup0_se4_l4_groups[] = { - "gpio94", +static const char *const qup0_se4_l4_mira_groups[] = { + "gpio64", +}; + +static const char *const qup0_se4_l4_mirb_groups[] = { + "gpio12", }; static const char *const qup0_se5_l0_groups[] = { - "gpio95", + "gpio65", }; static const char *const qup0_se5_l1_groups[] = { - "gpio96", + "gpio66", }; static const char *const qup0_se5_l2_groups[] = { - "gpio97", + "gpio67", }; static const char *const qup0_se5_l3_groups[] = { - "gpio98", + "gpio68", }; static const char *const qup1_se0_l0_groups[] = { - "gpio4", + "gpio61", }; static const char *const qup1_se0_l1_groups[] = { - "gpio5", + "gpio62", }; static const char *const qup1_se0_l2_groups[] = { - "gpio63", + "gpio43", }; static const char *const qup1_se0_l3_groups[] = { - "gpio64", + "gpio44", }; static const char *const qup1_se1_l0_groups[] = { @@ -1375,19 +1476,19 @@ static const char *const qup1_se2_l3_groups[] = { }; static const char *const qup1_se3_l0_groups[] = { - "gpio109", + "gpio71", }; static const char *const qup1_se3_l1_groups[] = { - "gpio110", + "gpio72", }; static const char *const qup1_se3_l2_groups[] = { - "gpio35", + "gpio28", }; static const char *const qup1_se3_l3_groups[] = { - "gpio36", + "gpio29", }; static const char *const qup1_se4_l0_groups[] = { @@ -1398,20 +1499,36 @@ static const char *const qup1_se4_l1_groups[] = { "gpio5", }; -static const char *const qup1_se4_l2_groups[] = { +static const char *const qup1_se4_l2_mira_groups[] = { "gpio6", }; -static const char *const qup1_se4_l3_groups[] = { +static const char *const qup1_se4_l2_mirb_groups[] = { + "gpio119", +}; + +static const char *const qup1_se4_l3_mira_groups[] = { "gpio7", }; -static const char *const qup1_se5_l0_groups[] = { - "gpio14", +static const char *const qup1_se4_l3_mirb_groups[] = { + "gpio120", }; -static const char *const qup1_se5_l1_groups[] = { - "gpio15", +static const char *const qup1_se5_l0_mira_groups[] = { + "gpio108", +}; + +static const char *const qup1_se5_l0_mirb_groups[] = { + "gpio10", +}; + +static const char *const qup1_se5_l1_mira_groups[] = { + "gpio109", +}; + +static const char *const qup1_se5_l1_mirb_groups[] = { + "gpio11", }; static const char *const qup1_se5_l2_groups[] = { @@ -1422,18 +1539,26 @@ static const char *const qup1_se5_l3_groups[] = { "gpio15", }; -static const char *const sys_throttle_mira_groups[] = { - "gpio95", +static const char *const sdc2_clk_groups[] = { + "gpio73", }; -static const char *const sys_throttle_mirb_groups[] = { - "gpio96", +static const char *const sdc2_cmd_groups[] = { + "gpio32", +}; + +static const char *const sdc2_fb_clk_groups[] = { + "gpio74", }; static const char *const tb_trig_sdc1_groups[] = { "gpio88", }; +static const char *const tb_trig_sdc2_groups[] = { + "gpio90", +}; + static const char *const tgu_ch0_trigout_groups[] = { "gpio51", }; @@ -1443,7 +1568,7 @@ static const char *const tmess_prng0_groups[] = { }; static const char *const tmess_prng1_groups[] = { - "gpio64", + "gpio62", }; static const char *const tmess_prng2_groups[] = { @@ -1454,36 +1579,32 @@ static const char *const tmess_prng3_groups[] = { "gpio66", }; +static const char *const tsense_mirnat_RESERVED_groups[] = { + "gpio53", +}; + static const char *const tsense_pwm1_groups[] = { - "gpio60", -}; - -static const char *const tsense_pwm2_groups[] = { - "gpio60", -}; - -static const char *const tsense_pwm3_groups[] = { - "gpio60", + "gpio74", }; static const char *const usb0_hs_groups[] = { "gpio76", }; -static const char *const usb0_phy_groups[] = { - "gpio99", "gpio100", +static const char *const usb0_phy_ps_groups[] = { + "gpio69", }; static const char *const vsense_trigger_mirnat_groups[] = { - "gpio72", + "gpio80", }; static const char *const wcn_sw_groups[] = { - "gpio31", + "gpio70", }; static const char *const wcn_sw_ctrl_groups[] = { - "gpio30", + "gpio33", }; @@ -1491,6 +1612,7 @@ static const struct msm_function seraph_functions[] = { FUNCTION(gpio), FUNCTION(RESOUT_GPIO_N), FUNCTION(aoss_cti), + FUNCTION(aoss_ts), FUNCTION(atest_char0), FUNCTION(atest_char1), FUNCTION(atest_char2), @@ -1506,36 +1628,40 @@ static const struct msm_function seraph_functions[] = { FUNCTION(audio_ref_clk), FUNCTION(cam_asc_mclk4), FUNCTION(cam_mclk), - FUNCTION(cci01_async_in0), - FUNCTION(cci01_async_in1), - FUNCTION(cci01_async_in2), - FUNCTION(cci01_timer0), - FUNCTION(cci01_timer1), - FUNCTION(cci01_timer2), - FUNCTION(cci01_timer3), - FUNCTION(cci01_timer4), - FUNCTION(cci0_i2c), + FUNCTION(cci0_async_in0), + FUNCTION(cci0_async_in1), + FUNCTION(cci0_async_in2), FUNCTION(cci0_i2c_scl0), + FUNCTION(cci0_i2c_scl1), + FUNCTION(cci0_i2c_scl2), + FUNCTION(cci0_i2c_scl3), FUNCTION(cci0_i2c_sda0), - FUNCTION(cci1_i2c), - FUNCTION(cci1_i2c_scl2), - FUNCTION(cci1_i2c_sda2), - FUNCTION(cci23_async_in0), - FUNCTION(cci23_async_in1), - FUNCTION(cci23_async_in2), - FUNCTION(cci23_timer0), - FUNCTION(cci23_timer1), - FUNCTION(cci23_timer2), - FUNCTION(cci23_timer3), - FUNCTION(cci23_timer4), - FUNCTION(cci2_i2c_scl4), - FUNCTION(cci2_i2c_scl5), - FUNCTION(cci2_i2c_sda4), - FUNCTION(cci2_i2c_sda5), - FUNCTION(cci3_i2c_scl6), - FUNCTION(cci3_i2c_scl7), - FUNCTION(cci3_i2c_sda6), - FUNCTION(cci3_i2c_sda7), + FUNCTION(cci0_i2c_sda1), + FUNCTION(cci0_i2c_sda2), + FUNCTION(cci0_i2c_sda3), + FUNCTION(cci0_timer0), + FUNCTION(cci0_timer1), + FUNCTION(cci0_timer2), + FUNCTION(cci0_timer3_mira), + FUNCTION(cci0_timer3_mirb), + FUNCTION(cci0_timer4_mira), + FUNCTION(cci0_timer4_mirb), + FUNCTION(cci1_async_in0), + FUNCTION(cci1_async_in1), + FUNCTION(cci1_async_in2), + FUNCTION(cci1_i2c_scl4), + FUNCTION(cci1_i2c_scl5), + FUNCTION(cci1_i2c_scl6), + FUNCTION(cci1_i2c_scl7), + FUNCTION(cci1_i2c_sda4), + FUNCTION(cci1_i2c_sda5), + FUNCTION(cci1_i2c_sda6), + FUNCTION(cci1_i2c_sda7), + FUNCTION(cci1_timer0), + FUNCTION(cci1_timer1), + FUNCTION(cci1_timer2), + FUNCTION(cci1_timer3), + FUNCTION(cci1_timer4), FUNCTION(dbg_out_clk), FUNCTION(ddr_bist_complete), FUNCTION(ddr_bist_fail), @@ -1544,12 +1670,24 @@ static const struct msm_function seraph_functions[] = { FUNCTION(ddr_pxi0), FUNCTION(dp0_hot), FUNCTION(gcc_gp1), + FUNCTION(gcc_gp10_clk), + FUNCTION(gcc_gp11_clk), FUNCTION(gcc_gp2), FUNCTION(gcc_gp3), + FUNCTION(gcc_gp4_clk), + FUNCTION(gcc_gp5_clk), + FUNCTION(gcc_gp6_clk), + FUNCTION(gcc_gp7_clk), + FUNCTION(gcc_gp8_clk), + FUNCTION(gcc_gp9_clk), + FUNCTION(host2wlan_sol_mira), + FUNCTION(host2wlan_sol_mirb), FUNCTION(i2s0_data0), FUNCTION(i2s0_data1), FUNCTION(i2s0_sck), FUNCTION(i2s0_ws), + FUNCTION(i3c_s_scl), + FUNCTION(i3c_s_sda), FUNCTION(ibi_i3c), FUNCTION(jitter_bist), FUNCTION(mdp_vsync), @@ -1560,6 +1698,7 @@ static const struct msm_function seraph_functions[] = { FUNCTION(mdp_vsync_e), FUNCTION(pcie0_clk_req_n), FUNCTION(pcie1_clk_req_n), + FUNCTION(pcie1_rst_n), FUNCTION(phase_flag0), FUNCTION(phase_flag1), FUNCTION(phase_flag10), @@ -1598,6 +1737,16 @@ static const struct msm_function seraph_functions[] = { FUNCTION(prng_rosc1), FUNCTION(prng_rosc2), FUNCTION(prng_rosc3), + FUNCTION(pwm_0), + FUNCTION(pwm_1), + FUNCTION(pwm_2), + FUNCTION(pwm_3), + FUNCTION(pwm_4), + FUNCTION(pwm_5), + FUNCTION(pwm_6), + FUNCTION(pwm_7), + FUNCTION(pwm_8), + FUNCTION(pwm_9), FUNCTION(qdss_cti), FUNCTION(qdss_gpio_traceclk), FUNCTION(qdss_gpio_tracectl), @@ -1628,7 +1777,6 @@ static const struct msm_function seraph_functions[] = { FUNCTION(qup0_se0_l1), FUNCTION(qup0_se0_l2), FUNCTION(qup0_se0_l3), - FUNCTION(qup0_se0_l4), FUNCTION(qup0_se1_l0), FUNCTION(qup0_se1_l1), FUNCTION(qup0_se1_l2), @@ -1637,6 +1785,7 @@ static const struct msm_function seraph_functions[] = { FUNCTION(qup0_se2_l1), FUNCTION(qup0_se2_l2), FUNCTION(qup0_se2_l3), + FUNCTION(qup0_se2_l4), FUNCTION(qup0_se3_l0), FUNCTION(qup0_se3_l1), FUNCTION(qup0_se3_l2), @@ -1646,7 +1795,8 @@ static const struct msm_function seraph_functions[] = { FUNCTION(qup0_se4_l1), FUNCTION(qup0_se4_l2), FUNCTION(qup0_se4_l3), - FUNCTION(qup0_se4_l4), + FUNCTION(qup0_se4_l4_mira), + FUNCTION(qup0_se4_l4_mirb), FUNCTION(qup0_se5_l0), FUNCTION(qup0_se5_l1), FUNCTION(qup0_se5_l2), @@ -1669,25 +1819,30 @@ static const struct msm_function seraph_functions[] = { FUNCTION(qup1_se3_l3), FUNCTION(qup1_se4_l0), FUNCTION(qup1_se4_l1), - FUNCTION(qup1_se4_l2), - FUNCTION(qup1_se4_l3), - FUNCTION(qup1_se5_l0), - FUNCTION(qup1_se5_l1), + FUNCTION(qup1_se4_l2_mira), + FUNCTION(qup1_se4_l2_mirb), + FUNCTION(qup1_se4_l3_mira), + FUNCTION(qup1_se4_l3_mirb), + FUNCTION(qup1_se5_l0_mira), + FUNCTION(qup1_se5_l0_mirb), + FUNCTION(qup1_se5_l1_mira), + FUNCTION(qup1_se5_l1_mirb), FUNCTION(qup1_se5_l2), FUNCTION(qup1_se5_l3), - FUNCTION(sys_throttle_mira), - FUNCTION(sys_throttle_mirb), + FUNCTION(sdc2_clk), + FUNCTION(sdc2_cmd), + FUNCTION(sdc2_fb_clk), FUNCTION(tb_trig_sdc1), + FUNCTION(tb_trig_sdc2), FUNCTION(tgu_ch0_trigout), FUNCTION(tmess_prng0), FUNCTION(tmess_prng1), FUNCTION(tmess_prng2), FUNCTION(tmess_prng3), + FUNCTION(tsense_mirnat_RESERVED), FUNCTION(tsense_pwm1), - FUNCTION(tsense_pwm2), - FUNCTION(tsense_pwm3), FUNCTION(usb0_hs), - FUNCTION(usb0_phy), + FUNCTION(usb0_phy_ps), FUNCTION(vsense_trigger_mirnat), FUNCTION(wcn_sw), FUNCTION(wcn_sw_ctrl), @@ -1699,316 +1854,277 @@ static const struct msm_function seraph_functions[] = { * Clients would not be able to request these dummy pin groups. */ static const struct msm_pingroup seraph_groups[] = { - [0] = PINGROUP(0, qup0_se0_l0, ibi_i3c, aoss_cti, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84010, 0), - [1] = PINGROUP(1, qup0_se0_l1, ibi_i3c, aoss_cti, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84010, 1), - [2] = PINGROUP(2, qup0_se0_l2, qup0_se1_l0, aoss_cti, NA, NA, NA, NA, + [0] = PINGROUP(0, qup0_se2_l0, aoss_ts, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84010, 0), + [1] = PINGROUP(1, qup0_se2_l1, aoss_ts, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84010, 1), + [2] = PINGROUP(2, qup0_se2_l2, qspi0_clk, aoss_ts, pwm_0, NA, NA, NA, NA, NA, NA, NA, 0x84010, 2), - [3] = PINGROUP(3, qup0_se0_l3, qup0_se1_l1, aoss_cti, NA, NA, NA, NA, + [3] = PINGROUP(3, qup0_se2_l3, qspi0_cs0_n, aoss_ts, pwm_1, NA, NA, NA, NA, NA, NA, NA, 0x84010, 3), - [4] = PINGROUP(4, qup1_se4_l0, qup1_se0_l0, ibi_i3c, ibi_i3c, NA, NA, + [4] = PINGROUP(4, qup1_se4_l0, ibi_i3c, i3c_s_sda, pll_clk_aux, NA, NA, NA, NA, NA, NA, NA, 0x84000, 11), - [5] = PINGROUP(5, qup1_se4_l1, qup1_se0_l1, ibi_i3c, ibi_i3c, NA, NA, - NA, NA, NA, NA, NA, 0x84000, 12), - [6] = PINGROUP(6, qup1_se4_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84000, 13), - [7] = PINGROUP(7, qup1_se4_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84000, 14), - [8] = PINGROUP(8, qup1_se2_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [9] = PINGROUP(9, qup1_se2_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [10] = PINGROUP(10, qup1_se2_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [11] = PINGROUP(11, qup1_se2_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84000, 15), - [12] = PINGROUP(12, qup0_se2_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 4), - [13] = PINGROUP(13, qup0_se2_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 5), - [14] = PINGROUP(14, qup1_se5_l2, qup1_se5_l0, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [15] = PINGROUP(15, qup1_se5_l3, qup1_se5_l1, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84004, 0), - [16] = PINGROUP(16, qup0_se3_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 6), - [17] = PINGROUP(17, qup0_se3_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 7), - [18] = PINGROUP(18, qup0_se3_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 8), - [19] = PINGROUP(19, qup0_se3_l3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 9), + [5] = PINGROUP(5, qup1_se4_l1, ibi_i3c, i3c_s_scl, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84000, 12), + [6] = PINGROUP(6, qup1_se4_l2_mira, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84000, 13), + [7] = PINGROUP(7, qup1_se4_l3_mira, pcie1_rst_n, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84000, 14), + [8] = PINGROUP(8, qup1_se2_l0, phase_flag2, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [9] = PINGROUP(9, qup1_se2_l1, phase_flag3, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [10] = PINGROUP(10, qup1_se2_l2, qup1_se5_l0_mirb, phase_flag4, NA, NA, + NA, NA, NA, NA, NA, NA, 0, -1), + [11] = PINGROUP(11, qup1_se2_l3, qup1_se5_l1_mirb, phase_flag5, NA, NA, + NA, NA, NA, NA, NA, NA, 0x84000, 15), + [12] = PINGROUP(12, qup0_se0_l2, qup0_se4_l4_mirb, mdp_vsync, pwm_2, NA, + NA, NA, NA, NA, NA, NA, 0x84010, 4), + [13] = PINGROUP(13, qup0_se0_l3, mdp_vsync, pwm_3, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 5), + [14] = PINGROUP(14, qup1_se5_l2, phase_flag6, NA, qdss_gpio_tracedata15, + NA, NA, NA, NA, NA, NA, NA, 0, -1), + [15] = PINGROUP(15, qup1_se5_l3, phase_flag7, NA, qdss_gpio_tracedata14, + NA, NA, NA, NA, NA, NA, NA, 0x84004, 0), + [16] = PINGROUP(16, qup0_se3_l0, i2s0_data0, pwm_4, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 6), + [17] = PINGROUP(17, qup0_se3_l1, i2s0_data1, pwm_5, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 7), + [18] = PINGROUP(18, qup0_se3_l2, i2s0_sck, pwm_8, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84010, 8), + [19] = PINGROUP(19, qup0_se3_l3, i2s0_ws, pwm_9, phase_flag8, NA, NA, + NA, NA, NA, NA, NA, 0x84010, 9), [20] = PINGROUP(20, qup0_se4_l0, ibi_i3c, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84010, 10), [21] = PINGROUP(21, qup0_se4_l1, ibi_i3c, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84010, 11), - [22] = PINGROUP(22, qup0_se4_l2, qup0_se2_l0, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84010, 12), - [23] = PINGROUP(23, qup0_se4_l3, qup0_se2_l1, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84010, 13), - [24] = PINGROUP(24, qup1_se1_l0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + [22] = PINGROUP(22, qup0_se4_l2, qup0_se0_l0, ibi_i3c, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84010, 12), + [23] = PINGROUP(23, qup0_se4_l3, qup0_se0_l1, ibi_i3c, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84010, 13), + [24] = PINGROUP(24, qup1_se1_l0, phase_flag15, NA, + qdss_gpio_tracedata10, NA, NA, NA, NA, NA, NA, NA, 0x84004, 1), - [25] = PINGROUP(25, qup1_se1_l1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [26] = PINGROUP(26, qup1_se1_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [27] = PINGROUP(27, qup1_se1_l3, qdss_cti, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84004, 2), - [28] = PINGROUP(28, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, - 3), - [29] = PINGROUP(29, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, - 4), - [30] = PINGROUP(30, wcn_sw_ctrl, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 14), - [31] = PINGROUP(31, wcn_sw, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84004, 5), - [32] = PINGROUP(32, qspi0_data0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84004, 6), - [33] = PINGROUP(33, qspi0_data2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84004, 7), - [34] = PINGROUP(34, qspi0_data3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84010, 15), - [35] = PINGROUP(35, qspi0_clk, qup1_se3_l2, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84014, 0), - [36] = PINGROUP(36, qspi0_cs0_n, qup1_se3_l3, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [37] = PINGROUP(37, qspi0_data1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84004, 8), - [38] = PINGROUP(38, qspi0_cs1_n, NA, qdss_gpio_tracedata0, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84004, 9), - [39] = PINGROUP(39, NA, qdss_gpio_tracedata1, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84004, 10), - [40] = PINGROUP(40, ddr_bist_fail, qdss_gpio_tracedata6, NA, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84004, 11), - [41] = PINGROUP(41, qup0_se3_l4, ddr_bist_start, NA, - qdss_gpio_tracedata7, NA, NA, NA, NA, NA, NA, NA, - 0x84014, 1), - [42] = PINGROUP(42, qdss_gpio_tracedata8, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84014, 2), - [43] = PINGROUP(43, NA, qdss_gpio_tracedata9, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84004, 12), - [44] = PINGROUP(44, ddr_bist_complete, NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84004, 13), - [45] = PINGROUP(45, dp0_hot, ddr_bist_stop, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84004, 14), - [46] = PINGROUP(46, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [47] = PINGROUP(47, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84004, - 15), - [48] = PINGROUP(48, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, - 0), - [49] = PINGROUP(49, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, NA, NA, - NA, NA, NA, NA, NA, NA, 0, -1), - [50] = PINGROUP(50, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, NA, NA, - NA, NA, NA, NA, NA, NA, 0x84008, 1), - [51] = PINGROUP(51, tgu_ch0_trigout, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84008, 2), - [52] = PINGROUP(52, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [53] = PINGROUP(53, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [54] = PINGROUP(54, NA, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84008, 3), - [55] = PINGROUP(55, ddr_pxi0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84008, 4), - [56] = PINGROUP(56, pcie0_clk_req_n, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84008, 5), - [57] = PINGROUP(57, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, - 6), - [58] = PINGROUP(58, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, - 7), - [59] = PINGROUP(59, pcie1_clk_req_n, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84008, 8), - [60] = PINGROUP(60, tsense_pwm1, tsense_pwm2, tsense_pwm3, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84008, 9), - [61] = PINGROUP(61, qup0_se1_l2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84008, 10), - [62] = PINGROUP(62, qup0_se1_l3, NA, qdss_gpio_tracedata4, NA, NA, NA, - NA, NA, NA, NA, NA, 0, -1), - [63] = PINGROUP(63, qup1_se0_l2, NA, qdss_gpio_tracedata5, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 3), - [64] = PINGROUP(64, qup1_se0_l3, prng_rosc1, tmess_prng1, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 4), - [65] = PINGROUP(65, prng_rosc2, tmess_prng2, NA, atest_char0, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 5), - [66] = PINGROUP(66, prng_rosc3, tmess_prng3, NA, atest_char1, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 6), - [67] = PINGROUP(67, NA, atest_char2, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84014, 7), - [68] = PINGROUP(68, NA, qdss_gpio_tracedata2, atest_char3, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 8), - [69] = PINGROUP(69, cam_mclk, qdss_gpio_tracedata3, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0x84014, 9), - [70] = PINGROUP(70, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84014, 10), - [71] = PINGROUP(71, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84008, 11), - [72] = PINGROUP(72, cam_mclk, NA, vsense_trigger_mirnat, atest_usb00, - NA, NA, NA, NA, NA, NA, NA, 0x84008, 12), - [73] = PINGROUP(73, cam_asc_mclk4, jitter_bist, atest_usb01, NA, NA, NA, - NA, NA, NA, NA, NA, 0, -1), - [74] = PINGROUP(74, cam_mclk, NA, atest_usb02, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84008, 13), - [75] = PINGROUP(75, cam_mclk, dbg_out_clk, NA, atest_usb03, NA, NA, NA, - NA, NA, NA, NA, 0x84014, 11), - [76] = PINGROUP(76, usb0_hs, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84014, 12), - [77] = PINGROUP(77, cci01_timer0, qdss_cti, NA, atest_char_start, NA, - NA, NA, NA, NA, NA, NA, 0x84014, 13), - [78] = PINGROUP(78, cci01_timer1, cci3_i2c_sda6, qdss_cti, NA, NA, NA, - NA, NA, NA, NA, NA, 0x84014, 14), - [79] = PINGROUP(79, cci01_timer2, cci3_i2c_scl6, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84014, 15), - [80] = PINGROUP(80, cci01_timer3, cci3_i2c_sda7, cci01_async_in1, NA, + [25] = PINGROUP(25, qup1_se1_l1, phase_flag16, NA, qdss_gpio_tracedata9, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [81] = PINGROUP(81, cci01_timer4, cci3_i2c_scl7, cci01_async_in2, NA, - NA, NA, NA, NA, NA, NA, NA, 0x84018, 0), - [82] = PINGROUP(82, cci01_async_in0, qdss_cti, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x84018, 1), - [83] = PINGROUP(83, cci1_i2c, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [84] = PINGROUP(84, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84018, 2), - [85] = PINGROUP(85, cci0_i2c_sda0, prng_rosc0, tmess_prng0, NA, NA, NA, - NA, NA, NA, NA, NA, 0, -1), - [86] = PINGROUP(86, cci0_i2c_scl0, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84018, 3), - [87] = PINGROUP(87, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84018, 4), - [88] = PINGROUP(88, cci0_i2c, mdp_vsync_e, tb_trig_sdc1, NA, NA, NA, NA, - NA, NA, NA, NA, 0x84018, 5), - [89] = PINGROUP(89, cci1_i2c_sda2, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [90] = PINGROUP(90, cci1_i2c_scl2, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [91] = PINGROUP(91, cci2_i2c_sda4, gcc_gp2, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84018, 6), - [92] = PINGROUP(92, cci2_i2c_scl4, gcc_gp3, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84018, 7), - [93] = PINGROUP(93, qup0_se0_l4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [94] = PINGROUP(94, qup0_se4_l4, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [95] = PINGROUP(95, qup0_se5_l0, sys_throttle_mira, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0x84018, 8), - [96] = PINGROUP(96, qup0_se5_l1, sys_throttle_mirb, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0x84018, 9), - [97] = PINGROUP(97, qup0_se5_l2, mdp_vsync, pll_clk_aux, NA, NA, NA, NA, - NA, NA, NA, NA, 0x84018, 10), - [98] = PINGROUP(98, qup0_se5_l3, mdp_vsync, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0x84018, 11), - [99] = PINGROUP(99, usb0_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + [26] = PINGROUP(26, qup1_se1_l2, phase_flag17, NA, qdss_gpio_tracedata8, + NA, NA, NA, NA, NA, NA, NA, 0, -1), + [27] = PINGROUP(27, qup1_se1_l3, phase_flag27, NA, qdss_gpio_tracedata7, + NA, NA, NA, NA, NA, NA, NA, 0x84004, 2), + [28] = PINGROUP(28, qup1_se3_l2, aoss_ts, pwm_6, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84004, 3), + [29] = PINGROUP(29, qup1_se3_l3, aoss_ts, pwm_7, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84004, 4), + [30] = PINGROUP(30, qup0_se3_l4, mdp_vsync_e, aoss_ts, audio_ext_mclk0, + aoss_ts, NA, NA, NA, NA, NA, NA, 0x84010, 14), + [31] = PINGROUP(31, NA, qdss_gpio_tracedata2, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84004, 5), + [32] = PINGROUP(32, sdc2_cmd, qdss_gpio_tracedata1, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84004, 6), + [33] = PINGROUP(33, wcn_sw_ctrl, phase_flag12, NA, qdss_gpio_tracedata0, + NA, NA, NA, NA, NA, NA, NA, 0x84004, 7), + [34] = PINGROUP(34, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, + phase_flag13, NA, ddr_pxi0, NA, NA, NA, NA, NA, 0x84010, + 15), + [35] = PINGROUP(35, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, + phase_flag14, NA, ddr_pxi0, NA, NA, NA, NA, NA, 0x84014, + 0), + [36] = PINGROUP(36, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [37] = PINGROUP(37, NA, qdss_gpio_tracedata13, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84004, 8), + [38] = PINGROUP(38, pcie0_clk_req_n, NA, qdss_gpio_tracedata12, NA, NA, + NA, NA, NA, NA, NA, NA, 0x84004, 9), + [39] = PINGROUP(39, NA, gcc_gp3, qdss_gpio_tracedata11, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84004, 10), + [40] = PINGROUP(40, pcie1_clk_req_n, NA, phase_flag18, NA, + atest_char_start, NA, NA, NA, NA, NA, NA, 0x84004, 11), + [41] = PINGROUP(41, qup0_se1_l2, qspi0_data0, cci1_timer2, + ddr_bist_fail, NA, NA, NA, NA, NA, NA, NA, 0x84014, 1), + [42] = PINGROUP(42, qup0_se1_l3, qspi0_data1, cci0_timer3_mirb, + ddr_bist_start, NA, NA, NA, NA, NA, NA, NA, 0x84014, 2), + [43] = PINGROUP(43, qup1_se0_l2, cci0_timer4_mirb, ddr_bist_complete, + phase_flag19, NA, qdss_cti, NA, NA, NA, NA, NA, 0x84004, + 12), + [44] = PINGROUP(44, qup1_se0_l3, cci1_timer4, ddr_bist_stop, + phase_flag20, NA, qdss_cti, NA, NA, NA, NA, NA, 0x84004, + 13), + [45] = PINGROUP(45, cam_mclk, dp0_hot, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84004, 14), + [46] = PINGROUP(46, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [100] = PINGROUP(100, usb0_phy, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + [47] = PINGROUP(47, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84004, 15), + [48] = PINGROUP(48, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84008, 0), + [49] = PINGROUP(49, cam_asc_mclk4, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [50] = PINGROUP(50, cam_mclk, cci1_async_in1, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84008, 1), + [51] = PINGROUP(51, cci0_timer0, cci1_async_in2, phase_flag10, NA, + tgu_ch0_trigout, NA, NA, NA, NA, NA, NA, 0x84008, 2), + [52] = PINGROUP(52, cci0_timer1, cci1_i2c_sda6, phase_flag28, NA, + qdss_gpio_tracedata4, NA, NA, NA, NA, NA, NA, 0, -1), + [53] = PINGROUP(53, cci0_timer2, cci1_i2c_scl6, NA, + qdss_gpio_tracedata3, tsense_mirnat_RESERVED, NA, NA, + NA, NA, NA, NA, 0, -1), + [54] = PINGROUP(54, cci0_timer3_mira, cci1_i2c_sda7, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84008, 3), + [55] = PINGROUP(55, cci0_timer4_mira, cci1_i2c_scl7, host2wlan_sol_mirb, + NA, NA, NA, NA, NA, NA, NA, NA, 0x84008, 4), + [56] = PINGROUP(56, cci1_timer0, cci0_async_in0, phase_flag11, NA, NA, + NA, NA, NA, NA, NA, NA, 0x84008, 5), + [57] = PINGROUP(57, cci0_i2c_sda0, phase_flag21, NA, atest_char0, NA, + NA, NA, NA, NA, NA, NA, 0x84008, 6), + [58] = PINGROUP(58, cci0_i2c_scl0, phase_flag22, NA, atest_char1, NA, + NA, NA, NA, NA, NA, NA, 0x84008, 7), + [59] = PINGROUP(59, cci0_i2c_sda1, cci0_async_in2, phase_flag23, NA, + qdss_gpio_tracedata6, atest_char2, NA, NA, NA, NA, NA, + 0x84008, 8), + [60] = PINGROUP(60, cci0_i2c_scl1, cci0_async_in1, phase_flag24, NA, + qdss_gpio_tracedata5, atest_char3, NA, NA, NA, NA, NA, + 0x84008, 9), + [61] = PINGROUP(61, cci1_i2c_sda4, qup1_se0_l0, ibi_i3c, gcc_gp1, + aoss_cti, NA, qdss_cti, NA, NA, NA, NA, 0x84008, 10), + [62] = PINGROUP(62, cci1_i2c_scl4, qup1_se0_l1, ibi_i3c, tmess_prng1, + gcc_gp2, aoss_cti, NA, qdss_cti, NA, NA, NA, 0, -1), + [63] = PINGROUP(63, qup0_se2_l4, qspi0_data2, RESOUT_GPIO_N, dp0_hot, + NA, NA, NA, NA, NA, NA, NA, 0x84014, 3), + [64] = PINGROUP(64, qup0_se4_l4_mira, jitter_bist, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84014, 4), + [65] = PINGROUP(65, qup0_se5_l0, cci1_async_in0, prng_rosc2, + tmess_prng2, cci1_timer1, NA, qdss_cti, NA, NA, NA, NA, + 0x84014, 5), + [66] = PINGROUP(66, qup0_se5_l1, prng_rosc3, tmess_prng3, NA, qdss_cti, + NA, NA, NA, NA, NA, NA, 0x84014, 6), + [67] = PINGROUP(67, qup0_se5_l2, qup0_se1_l0, qspi0_data3, NA, aoss_cti, + NA, NA, NA, NA, NA, NA, 0x84014, 7), + [68] = PINGROUP(68, qup0_se5_l3, qup0_se1_l1, qspi0_cs1_n, aoss_cti, NA, + NA, NA, NA, NA, NA, NA, 0x84014, 8), + [69] = PINGROUP(69, usb0_phy_ps, aoss_ts, qdss_cti, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84014, 9), + [70] = PINGROUP(70, audio_ext_mclk1, audio_ref_clk, aoss_ts, wcn_sw, + qdss_cti, NA, NA, NA, NA, NA, NA, 0x84014, 10), + [71] = PINGROUP(71, qup1_se3_l0, aoss_ts, phase_flag25, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84008, 11), + [72] = PINGROUP(72, qup1_se3_l1, aoss_ts, aoss_ts, phase_flag26, NA, NA, + NA, NA, NA, NA, NA, 0x84008, 12), + [73] = PINGROUP(73, host2wlan_sol_mira, sdc2_clk, prng_rosc1, + qdss_gpio_traceclk, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [74] = PINGROUP(74, sdc2_fb_clk, qdss_gpio_tracectl, tsense_pwm1, NA, + NA, NA, NA, NA, NA, NA, NA, 0x84008, 13), + [75] = PINGROUP(75, phase_flag29, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84014, 11), + [76] = PINGROUP(76, usb0_hs, phase_flag30, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84014, 12), + [77] = PINGROUP(77, phase_flag31, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84014, 13), + [78] = PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84014, + 14), + [79] = PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84014, + 15), + [80] = PINGROUP(80, dbg_out_clk, vsense_trigger_mirnat, NA, NA, NA, NA, + NA, NA, NA, NA, NA, 0, -1), + [81] = PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, + 0), + [82] = PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, + 1), + [83] = PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [84] = PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, + 2), + [85] = PINGROUP(85, prng_rosc0, tmess_prng0, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [86] = PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, + 3), + [87] = PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, + 4), + [88] = PINGROUP(88, tb_trig_sdc1, NA, atest_usb00, NA, NA, NA, NA, NA, + NA, NA, NA, 0x84018, 5), + [89] = PINGROUP(89, phase_flag1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [90] = PINGROUP(90, tb_trig_sdc2, atest_usb02, NA, NA, NA, NA, NA, NA, + NA, NA, NA, 0, -1), + [91] = PINGROUP(91, atest_usb03, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 6), + [92] = PINGROUP(92, phase_flag0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 7), + [93] = PINGROUP(93, NA, atest_usb01, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [94] = PINGROUP(94, atest_usb0, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [95] = PINGROUP(95, gcc_gp9_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 8), + [96] = PINGROUP(96, gcc_gp10_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84018, 9), + [97] = PINGROUP(97, gcc_gp11_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84018, 10), + [98] = PINGROUP(98, gcc_gp1, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x84018, 11), + [99] = PINGROUP(99, gcc_gp2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [100] = PINGROUP(100, gcc_gp3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0x84018, 12), - [101] = PINGROUP(101, RESOUT_GPIO_N, NA, NA, NA, NA, NA, NA, NA, NA, NA, + [101] = PINGROUP(101, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [102] = PINGROUP(102, gcc_gp4_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84018, 13), + [103] = PINGROUP(103, gcc_gp5_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84018, 14), + [104] = PINGROUP(104, pll_bist_sync, gcc_gp6_clk, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 0x84018, 15), + [105] = PINGROUP(105, phase_flag9, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [102] = PINGROUP(102, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84018, 13), - [103] = PINGROUP(103, dp0_hot, audio_ext_mclk1, audio_ref_clk, NA, NA, - NA, NA, NA, NA, NA, NA, 0x84018, 14), - [104] = PINGROUP(104, audio_ext_mclk0, cci23_timer0, pll_bist_sync, NA, - NA, NA, NA, NA, NA, NA, NA, 0x84018, 15), - [105] = PINGROUP(105, i2s0_sck, cci23_timer1, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [106] = PINGROUP(106, i2s0_data0, cci23_timer2, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x8401C, 0), - [107] = PINGROUP(107, i2s0_data1, cci23_timer3, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0x8401C, 1), - [108] = PINGROUP(108, i2s0_ws, cci23_timer4, NA, NA, NA, NA, NA, NA, NA, + [106] = PINGROUP(106, gcc_gp7_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x8401C, 0), + [107] = PINGROUP(107, gcc_gp8_clk, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x8401C, 1), + [108] = PINGROUP(108, qup1_se5_l0_mira, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [109] = PINGROUP(109, qup1_se3_l0, cci2_i2c_scl5, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0, -1), - [110] = PINGROUP(110, qup1_se3_l1, cci2_i2c_sda5, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0, -1), - [111] = PINGROUP(111, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [112] = PINGROUP(112, cci0_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0x84008, 14), - [113] = PINGROUP(113, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), - [114] = PINGROUP(114, cci1_i2c, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, - 0, -1), + [109] = PINGROUP(109, qup1_se5_l1_mira, NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0, -1), + [110] = PINGROUP(110, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [111] = PINGROUP(111, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), + [112] = PINGROUP(112, cci1_i2c_sda5, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x84008, 14), + [113] = PINGROUP(113, cci1_i2c_scl5, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [114] = PINGROUP(114, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + -1), [115] = PINGROUP(115, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [116] = PINGROUP(116, cci23_async_in0, NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0, -1), - [117] = PINGROUP(117, cci23_async_in1, phase_flag26, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0, -1), - [118] = PINGROUP(118, cci23_async_in2, phase_flag27, NA, NA, NA, NA, NA, - NA, NA, NA, NA, 0x8401C, 2), - [119] = PINGROUP(119, phase_flag25, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x84008, 15), - [120] = PINGROUP(120, phase_flag24, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x8400C, 0), + [116] = PINGROUP(116, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [117] = PINGROUP(117, qdss_cti, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [118] = PINGROUP(118, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x8401C, 2), + [119] = PINGROUP(119, qup1_se4_l2_mirb, NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x84008, 15), + [120] = PINGROUP(120, qup1_se4_l3_mirb, NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, 0x8400C, 0), [121] = PINGROUP(121, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), [122] = PINGROUP(122, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [123] = PINGROUP(123, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, + [123] = PINGROUP(123, cci1_timer3, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0, -1), + [124] = PINGROUP(124, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [124] = PINGROUP(124, NA, phase_flag22, NA, NA, NA, NA, NA, NA, NA, NA, + [125] = PINGROUP(125, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x8400C, 1), + [126] = PINGROUP(126, cam_mclk, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0, -1), + [127] = PINGROUP(127, cci0_i2c_sda2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [125] = PINGROUP(125, phase_flag13, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0x8400C, 1), - [126] = PINGROUP(126, qdss_gpio_tracedata15, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [127] = PINGROUP(127, qdss_gpio_tracectl, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [128] = PINGROUP(128, qdss_gpio_traceclk, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [129] = PINGROUP(129, phase_flag5, qdss_gpio_tracedata14, atest_usb0, - NA, NA, NA, NA, NA, NA, NA, NA, 0x8400C, 2), - [130] = PINGROUP(130, gcc_gp1, phase_flag18, qdss_gpio_tracedata10, NA, - NA, NA, NA, NA, NA, NA, NA, 0, -1), - [131] = PINGROUP(131, gcc_gp2, phase_flag21, qdss_gpio_tracedata11, NA, - NA, NA, NA, NA, NA, NA, NA, 0x8401C, 3), - [132] = PINGROUP(132, gcc_gp3, qdss_gpio_tracedata12, NA, NA, NA, NA, - NA, NA, NA, NA, NA, 0, -1), - [133] = PINGROUP(133, phase_flag7, qdss_gpio_tracedata13, NA, NA, NA, - NA, NA, NA, NA, NA, NA, 0, -1), - [134] = PINGROUP(134, phase_flag12, NA, NA, NA, NA, NA, NA, NA, NA, NA, + [128] = PINGROUP(128, cci0_i2c_scl2, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), - [135] = PINGROUP(135, phase_flag6, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [136] = PINGROUP(136, phase_flag11, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [137] = PINGROUP(137, phase_flag10, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [138] = PINGROUP(138, phase_flag17, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [139] = PINGROUP(139, phase_flag16, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [140] = PINGROUP(140, phase_flag9, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [141] = PINGROUP(141, phase_flag1, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [142] = PINGROUP(142, phase_flag15, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [143] = PINGROUP(143, phase_flag8, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [144] = PINGROUP(144, phase_flag14, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [145] = PINGROUP(145, phase_flag31, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [146] = PINGROUP(146, qdss_cti, phase_flag30, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [147] = PINGROUP(147, phase_flag3, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [148] = PINGROUP(148, phase_flag29, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [149] = PINGROUP(149, gcc_gp1, phase_flag4, NA, NA, NA, NA, NA, NA, NA, - NA, NA, 0, -1), - [150] = PINGROUP(150, phase_flag19, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [151] = PINGROUP(151, qdss_cti, phase_flag20, NA, NA, NA, NA, NA, NA, - NA, NA, NA, 0, -1), - [152] = PINGROUP(152, phase_flag23, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [153] = PINGROUP(153, phase_flag28, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [154] = PINGROUP(154, phase_flag2, NA, NA, NA, NA, NA, NA, NA, NA, NA, - NA, 0, -1), - [155] = PINGROUP(155, phase_flag0, NA, NA, NA, NA, NA, NA, NA, NA, NA, + [129] = PINGROUP(129, cci0_i2c_sda3, NA, NA, NA, NA, NA, NA, NA, NA, NA, + NA, 0x8400C, 2), + [130] = PINGROUP(130, cci0_i2c_scl3, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 0, -1), + [131] = PINGROUP(131, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, + 0x8401C, 3), }; static struct pinctrl_qup seraph_qup_regs[] = { @@ -2039,7 +2155,7 @@ static const struct msm_pinctrl_soc_data seraph_pinctrl = { .nfunctions = ARRAY_SIZE(seraph_functions), .groups = seraph_groups, .ngroups = ARRAY_SIZE(seraph_groups), - .ngpios = 156, + .ngpios = 132, .qup_regs = seraph_qup_regs, .nqup_regs = ARRAY_SIZE(seraph_qup_regs), .wakeirq_map = seraph_pdc_map, diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 9af27a694527..97846f3794a1 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -295,6 +295,18 @@ config BATTERY_BQ27XXX_DT_UPDATES_NVM general-purpose kernels, as this can cause misconfiguration of a smart battery with embedded NVM/flash. +config BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + bool "BQ27xxx resistance table update of NVM/flash data memory" + depends on BATTERY_BQ27XXX_DT_UPDATES_NVM + help + Say Y here to enable devicetree monitored-battery resistance table config + and Qmax-cell0 value in the NVM/flash data memory. Only enable this option + when calibrated resistance table and Qmax-Cell0 parameters for the battery + in-use are updated in DT. If the Battery specific data is not available + in DT, then this config should not be set to Y. Not for general-purpose + kernels, as this can cause is the configuration of a smart battery with + embedded NVM/flash. + config BATTERY_DA9030 tristate "DA9030 battery driver" depends on PMIC_DA903X diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c index 967beb3142ff..08fecc67cf08 100644 --- a/drivers/power/supply/bq256xx_charger.c +++ b/drivers/power/supply/bq256xx_charger.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #define BQ256XX_MANUFACTURER "Texas Instruments" @@ -145,6 +147,8 @@ #define BQ256XX_REG_RST BIT(7) +#define BQ256XX_MAX_INPUT_VOLTAGE_UV 5400000 + /** * struct bq256xx_init_data - * @ichg: fast charge current @@ -216,6 +220,7 @@ enum bq256xx_id { * @charger: power supply registered for the charger * @battery: power supply registered for the battery * @lock: mutex lock structure + * @irq_lock: mutex lock structure for irq * * @usb2_phy: usb_phy identifier * @usb3_phy: usb_phy identifier @@ -229,6 +234,9 @@ enum bq256xx_id { * @chip_info: device variant information * @state: device status and faults * @watchdog_timer: watchdog timer value in milliseconds + * + * @irq_waiting: flag for status of irq waiting + * @resume_completed: suspend/resume flag */ struct bq256xx_device { struct i2c_client *client; @@ -236,6 +244,8 @@ struct bq256xx_device { struct power_supply *charger; struct power_supply *battery; struct mutex lock; + struct mutex irq_lock; + struct regmap *regmap; struct usb_phy *usb2_phy; @@ -252,6 +262,11 @@ struct bq256xx_device { int watchdog_timer; /* extcon for VBUS / ID notification to USB*/ struct extcon_dev *extcon; + + bool irq_waiting; + bool resume_completed; + /* debug_board_gpio to deteect the debug board*/ + int debug_board_gpio; }; /** @@ -1170,6 +1185,15 @@ static irqreturn_t bq256xx_irq_handler_thread(int irq, void *private) struct bq256xx_state state; int ret; + mutex_lock(&bq->irq_lock); + bq->irq_waiting = true; + if (!bq->resume_completed) { + pr_debug("IRQ triggered before device-resume\n"); + disable_irq_nosync(irq); + mutex_unlock(&bq->irq_lock); + return IRQ_HANDLED; + } + ret = bq256xx_get_state(bq, &state); if (ret < 0) goto irq_out; @@ -1184,6 +1208,8 @@ static irqreturn_t bq256xx_irq_handler_thread(int irq, void *private) power_supply_changed(bq->charger); irq_out: + bq->irq_waiting = false; + mutex_unlock(&bq->irq_lock); return IRQ_HANDLED; } @@ -1524,6 +1550,30 @@ static int bq256xx_power_supply_init(struct bq256xx_device *bq, return 0; } +static int bq256xx_debug_board_detect(struct bq256xx_device *bq) +{ + int ret = 0; + + if (!of_find_property(bq->dev->of_node, "debugboard-detect-gpio", NULL)) + return ret; + + bq->debug_board_gpio = of_get_named_gpio(bq->dev->of_node, + "debugboard-detect-gpio", 0); + if (IS_ERR(&bq->debug_board_gpio)) { + ret = PTR_ERR(&bq->debug_board_gpio); + dev_err(bq->dev, "Failed to initialize debugboard_detecte gpio\n"); + return ret; + } + gpio_direction_input(bq->debug_board_gpio); + if (gpio_get_value(bq->debug_board_gpio)) { + bq->init_data.vindpm = BQ256XX_MAX_INPUT_VOLTAGE_UV; + dev_info(bq->dev, + "debug_board detected, setting vindpm to %d\n", bq->init_data.vindpm); + } + + return ret; +} + static int bq256xx_hw_init(struct bq256xx_device *bq) { struct power_supply_battery_info *bat_info; @@ -1579,6 +1629,10 @@ static int bq256xx_hw_init(struct bq256xx_device *bq) bat_info->constant_charge_voltage_max_uv; } + ret = bq256xx_debug_board_detect(bq); + if (ret) + return ret; + ret = bq->chip_info->bq256xx_set_vindpm(bq, bq->init_data.vindpm); if (ret) return ret; @@ -1661,8 +1715,10 @@ static int bq256xx_probe(struct i2c_client *client, bq->client = client; bq->dev = dev; bq->chip_info = &bq256xx_chip_info_tbl[id->driver_data]; + bq->resume_completed = true; mutex_init(&bq->lock); + mutex_init(&bq->irq_lock); strncpy(bq->model_name, id->name, I2C_NAME_SIZE); @@ -1701,18 +1757,6 @@ static int bq256xx_probe(struct i2c_client *client, usb_register_notifier(bq->usb3_phy, &bq->usb_nb); } - if (client->irq) { - ret = devm_request_threaded_irq(dev, client->irq, NULL, - bq256xx_irq_handler_thread, - IRQF_TRIGGER_FALLING | - IRQF_ONESHOT, - dev_name(&client->dev), bq); - if (ret < 0) { - dev_err(dev, "get irq fail: %d\n", ret); - return ret; - } - } - ret = bq256xx_power_supply_init(bq, &psy_cfg, dev); if (ret) { dev_err(dev, "Failed to register power supply\n"); @@ -1747,6 +1791,23 @@ static int bq256xx_probe(struct i2c_client *client, extcon_set_state_sync(bq->extcon, EXTCON_USB, !!state.vbus_gd); + if (client->irq) { + ret = devm_request_threaded_irq(dev, client->irq, NULL, + bq256xx_irq_handler_thread, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + dev_name(&client->dev), bq); + if (ret < 0) { + dev_err(dev, "get irq fail: %d\n", ret); + return ret; + } + + enable_irq_wake(client->irq); + } + + dev_dbg(dev, "bq256xx successfully probed. charger=0x%x\n", + state.vbus_gd); + return ret; } @@ -1786,11 +1847,92 @@ static const struct acpi_device_id bq256xx_acpi_match[] = { }; MODULE_DEVICE_TABLE(acpi, bq256xx_acpi_match); + +static int bq256xx_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bq256xx_device *bq = i2c_get_clientdata(client); + + mutex_lock(&bq->irq_lock); + bq->resume_completed = false; + mutex_unlock(&bq->irq_lock); + + return 0; +} + +static int bq256xx_suspend_noirq(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bq256xx_device *bq = i2c_get_clientdata(client); + + if (bq->irq_waiting) { + dev_err_ratelimited(dev, "Aborting suspend, an interrupt was detected while suspending\n"); + return -EBUSY; + } + + return 0; +} + +static int bq256xx_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct bq256xx_device *bq = i2c_get_clientdata(client); + + mutex_lock(&bq->irq_lock); + bq->resume_completed = true; + mutex_unlock(&bq->irq_lock); + if (bq->irq_waiting) { + /* irq was pending, call the handler */ + bq256xx_irq_handler_thread(client->irq, bq); + enable_irq(client->irq); + } + + return 0; +} + +static int bq256xx_restore(struct device *dev) +{ + int ret = 0; + struct i2c_client *client = to_i2c_client(dev); + struct bq256xx_device *bq = i2c_get_clientdata(client); + + if (client->irq > 0) { + disable_irq_nosync(client->irq); + devm_free_irq(dev, client->irq, bq); + /* + * Set extcon state depending upon USB connect/disconnect state + * on hibernation exit + */ + bq256xx_irq_handler_thread(client->irq, bq); + ret = devm_request_threaded_irq(dev, client->irq, NULL, + bq256xx_irq_handler_thread, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + dev_name(&client->dev), bq); + if (ret < 0) { + dev_err(dev, "get irq fail: %d\n", ret); + return ret; + } + + enable_irq_wake(client->irq); + } + + return ret; +} + +static const struct dev_pm_ops bq256xx_pm_ops = { + .suspend = bq256xx_suspend, + .suspend_noirq = bq256xx_suspend_noirq, + .resume = bq256xx_resume, + .restore = bq256xx_restore, +}; + static struct i2c_driver bq256xx_driver = { .driver = { .name = "bq256xx-charger", .of_match_table = bq256xx_of_match, .acpi_match_table = bq256xx_acpi_match, + .pm = &bq256xx_pm_ops, }, .probe = bq256xx_probe, .id_table = bq256xx_i2c_ids, diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 4a5371a3a531..ca4f7d26684b 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -866,6 +866,25 @@ enum bq27xxx_dm_reg_id { BQ27XXX_DM_DESIGN_CAPACITY = 0, BQ27XXX_DM_DESIGN_ENERGY, BQ27XXX_DM_TERMINATE_VOLTAGE, +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + BQ27XXX_DM_TAPER_RATE, + BQ27XXX_DM_QMAX, + BQ27XXX_RAM_R_A0_0, + BQ27XXX_RAM_R_A0_1, + BQ27XXX_RAM_R_A0_2, + BQ27XXX_RAM_R_A0_3, + BQ27XXX_RAM_R_A0_4, + BQ27XXX_RAM_R_A0_5, + BQ27XXX_RAM_R_A0_6, + BQ27XXX_RAM_R_A0_7, + BQ27XXX_RAM_R_A0_8, + BQ27XXX_RAM_R_A0_9, + BQ27XXX_RAM_R_A0_10, + BQ27XXX_RAM_R_A0_11, + BQ27XXX_RAM_R_A0_12, + BQ27XXX_RAM_R_A0_13, + BQ27XXX_RAM_R_A0_14, +#endif }; #define bq27000_dm_regs NULL @@ -920,6 +939,25 @@ static struct bq27xxx_dm_reg bq27421_dm_regs[] = { [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 }, [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 }, [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2500, 3700 }, +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + [BQ27XXX_DM_TAPER_RATE] = { 82, 27, 2, 0, 2000 }, /* Taper rate */ + [BQ27XXX_DM_QMAX] = { 82, 0, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_0] = { 89, 0, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_1] = { 89, 2, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_2] = { 89, 4, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_3] = { 89, 6, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_4] = { 89, 8, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_5] = { 89, 10, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_6] = { 89, 12, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_7] = { 89, 14, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_8] = { 89, 16, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_9] = { 89, 18, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_10] = { 89, 20, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_11] = { 89, 22, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_12] = { 89, 24, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_13] = { 89, 26, 2, 0, 32767 }, + [BQ27XXX_RAM_R_A0_14] = { 89, 28, 2, 0, 32767 }, +#endif }; static struct bq27xxx_dm_reg bq27425_dm_regs[] = { @@ -1058,10 +1096,30 @@ static const char * const bq27xxx_dm_reg_name[] = { [BQ27XXX_DM_DESIGN_CAPACITY] = "design-capacity", [BQ27XXX_DM_DESIGN_ENERGY] = "design-energy", [BQ27XXX_DM_TERMINATE_VOLTAGE] = "terminate-voltage", +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + [BQ27XXX_DM_TAPER_RATE] = "Taper-rate", + [BQ27XXX_DM_QMAX] = "QMAX-Cell", + [BQ27XXX_RAM_R_A0_0] = "R_A0_0", + [BQ27XXX_RAM_R_A0_1] = "R_A0_1", + [BQ27XXX_RAM_R_A0_2] = "R_A0_2", + [BQ27XXX_RAM_R_A0_3] = "R_A0_3", + [BQ27XXX_RAM_R_A0_4] = "R_A0_4", + [BQ27XXX_RAM_R_A0_5] = "R_A0_5", + [BQ27XXX_RAM_R_A0_6] = "R_A0_6", + [BQ27XXX_RAM_R_A0_7] = "R_A0_7", + [BQ27XXX_RAM_R_A0_8] = "R_A0_8", + [BQ27XXX_RAM_R_A0_9] = "R_A0_9", + [BQ27XXX_RAM_R_A0_10] = "R_A0_10", + [BQ27XXX_RAM_R_A0_11] = "R_A0_11", + [BQ27XXX_RAM_R_A0_12] = "R_A0_12", + [BQ27XXX_RAM_R_A0_13] = "R_A0_13", + [BQ27XXX_RAM_R_A0_14] = "R_A0_14", +#endif + }; -static bool bq27xxx_dt_to_nvm = true; +static bool bq27xxx_dt_to_nvm; module_param_named(dt_monitored_battery_updates_nvm, bq27xxx_dt_to_nvm, bool, 0444); MODULE_PARM_DESC(dt_monitored_battery_updates_nvm, "Devicetree monitored-battery config updates data memory on NVM/flash chips.\n" @@ -1390,7 +1448,8 @@ static int bq27xxx_battery_write_dm_block(struct bq27xxx_device_info *di, BQ27XXX_MSLEEP(1); - ret = bq27xxx_write_block(di, BQ27XXX_DM_DATA, buf->data, BQ27XXX_DM_SZ); + ret = bq27xxx_write_block(di, BQ27XXX_DM_DATA, buf->data, + (BQ27XXX_DM_SZ-1)); if (ret < 0) goto out; @@ -1431,6 +1490,10 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di, struct bq27xxx_dm_buf bd = BQ27XXX_DM_BUF(di, BQ27XXX_DM_DESIGN_CAPACITY); struct bq27xxx_dm_buf bt = BQ27XXX_DM_BUF(di, BQ27XXX_DM_TERMINATE_VOLTAGE); bool updated; +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + struct bq27xxx_dm_buf rt = BQ27XXX_DM_BUF(di, BQ27XXX_RAM_R_A0_0); + u32 i, taper_rate; +#endif if (bq27xxx_battery_unseal(di) < 0) return; @@ -1438,13 +1501,30 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di, if (info->charge_full_design_uah != -EINVAL && info->energy_full_design_uwh != -EINVAL) { bq27xxx_battery_read_dm_block(di, &bd); - /* assume design energy & capacity are in same block */ + + /* assume design energy, taper_rate & capacity are in same block */ bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_DESIGN_CAPACITY, info->charge_full_design_uah / 1000); bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_DESIGN_ENERGY, info->energy_full_design_uwh / 1000); + +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + bq27xxx_battery_read_dm_block(di, &rt); + /* update Taper rate based on the capacity and term current */ + taper_rate = (u32)((info->charge_full_design_uah * 10) / + info->charge_term_current_ua); + bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_TAPER_RATE, + taper_rate); + /* update the QMAX-CELL0 and resistance table */ + bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_QMAX, + di->qmax_cell0); + for (i = 0 ; i < 15; i++) + bq27xxx_battery_update_dm_block(di, &rt, + (i + BQ27XXX_RAM_R_A0_0), + di->resist_table[i]); +#endif } if (info->voltage_min_design_uv != -EINVAL) { @@ -1461,6 +1541,19 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di, bq27xxx_battery_write_dm_block(di, &bd); bq27xxx_battery_write_dm_block(di, &bt); +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + bq27xxx_battery_write_dm_block(di, &rt); + + bq27xxx_battery_read_dm_block(di, &bd); + for (i = 0; i < BQ27XXX_DM_SZ; i++) + dev_dbg(di->dev, "BQ27xxx: DM_NVM[%d]: 0x%04x\n", i, bd.data[i]); + + bq27xxx_battery_read_dm_block(di, &rt); + for (i = 0; i < BQ27XXX_DM_SZ; i++) + dev_dbg(di->dev, "BQ27xxx: Resisiatnce table DM_NVM[%d]:0x%04x\n", + i, rt.data[i]); +#endif + bq27xxx_battery_seal(di); if (updated && !(di->opts & BQ27XXX_O_CFGUP)) { diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 0713a52a2510..ec3a8873b037 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -136,6 +136,59 @@ static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di, return 0; } +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM +static int bq27xx_parse_dt(struct bq27xxx_device_info *di, + struct device *dev, + struct device_node *battery_np) +{ + int ret; + int rc; + + ret = of_property_read_u32(battery_np, "qmax-cell0", &di->qmax_cell0); + if (ret) { + dev_err(dev, "Undefined Qmax-Cell0\n"); + return ret; + } + + rc = of_property_count_elems_of_size(battery_np, "resist-table", + sizeof(u32)); + + if (rc != BQ27XXX_RESISTANCE_TABLE_LENGTH) { + dev_err(dev, "Invalid number of elements in resist-table\n"); + return -EINVAL; + } + + ret = of_property_read_u32_array(battery_np, "resist-table", + di->resist_table, BQ27XXX_RESISTANCE_TABLE_LENGTH); + if (ret) + dev_err(dev, "Undefined resistance table\n"); + + return ret; +} +#endif + +static int bq27xxx_restore(struct device *dev) +{ + int ret = 0; + struct i2c_client *client = to_i2c_client(dev); + struct bq27xxx_device_info *di = i2c_get_clientdata(client); + + if (client->irq > 0) { + disable_irq_nosync(client->irq); + devm_free_irq(dev, client->irq, di); + ret = request_threaded_irq(client->irq, + NULL, bq27xxx_battery_irq_handler_thread, + IRQF_ONESHOT, + di->name, di); + } + + return ret; +} + +static const struct dev_pm_ops bq27xxx_pm_ops = { + .restore = bq27xxx_restore, +}; + static int bq27xxx_battery_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -143,6 +196,9 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client, int ret; char *name; int num; +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + struct device_node *battery_np_rt; +#endif /* Get new ID for the new battery device */ mutex_lock(&battery_mutex); @@ -169,6 +225,17 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client, di->bus.read_bulk = bq27xxx_battery_i2c_bulk_read; di->bus.write_bulk = bq27xxx_battery_i2c_bulk_write; +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + battery_np_rt = of_parse_phandle(client->dev.of_node, + "bat-resist-table", 0); + if (!battery_np_rt) + return -ENODEV; + ret = bq27xx_parse_dt(di, di->dev, battery_np_rt); + of_node_put(battery_np_rt); + if (ret) + return -EINVAL; +#endif + ret = bq27xxx_battery_setup(di); if (ret) goto err_failed; @@ -295,6 +362,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = { .driver = { .name = "bq27xxx-battery", .of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table), + .pm = &bq27xxx_pm_ops, }, .probe = bq27xxx_battery_i2c_probe, .remove = bq27xxx_battery_i2c_remove, diff --git a/drivers/powercap/Kconfig b/drivers/powercap/Kconfig index 93b651f97cd6..78d795414ff3 100644 --- a/drivers/powercap/Kconfig +++ b/drivers/powercap/Kconfig @@ -77,4 +77,16 @@ config DTPM_DEVFREQ help This enables support for device power limitation based on energy model. + +config QCOM_POWER_TELEMETRY + tristate "Qualcomm Technologies, Inc. Power Telemetry Hardware driver" + depends on SPMI && NVMEM_SPMI_SDAM + default n + help + This enables Qualcomm Technologies, Inc. power telemetry + hardware device driver. It provides to measure different + pmic regulators or bucks power consumption data in different + modes. It exposes these data to userspace clients via + powercap sysfs interface. + endif diff --git a/drivers/powercap/Makefile b/drivers/powercap/Makefile index 72b860c85cef..6988e0b34aad 100644 --- a/drivers/powercap/Makefile +++ b/drivers/powercap/Makefile @@ -8,3 +8,5 @@ obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o obj-$(CONFIG_IDLE_INJECT) += idle_inject.o obj-$(CONFIG_QCOM_EPM) += qti_epm_hardware.o qti_epm_hardware-y += qti_epm_hw.o qti_epm_interface.o +obj-$(CONFIG_QCOM_POWER_TELEMETRY) += qcom_power_telemetry.o +qcom_power_telemetry-y += qti_power_telemetry.o qti_power_telemetry_interface.o diff --git a/drivers/powercap/qti_power_telemetry.c b/drivers/powercap/qti_power_telemetry.c new file mode 100644 index 000000000000..b1ce67cfda60 --- /dev/null +++ b/drivers/powercap/qti_power_telemetry.c @@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "qti_qpt: %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qti_power_telemetry.h" + +#define QPT_CONFIG_SDAM_BASE_OFF 0x45 +#define QPT_DATA_SDAM_BASE_OFF 0x45 +#define QPT_CH_ENABLE_MASK BIT(7) +#define QPT_SID_MASK GENMASK(3, 0) +#define QPT_GANG_NUM_MASK 0x70 +#define QPT_DATA_BYTE_SIZE 2 +#define QPT_DATA_TO_POWER_UW 1500L /* 1 LSB = 1.5 mW */ + +#define QPT_GET_POWER_UW_FROM_ADC(adc) ((adc) * QPT_DATA_TO_POWER_UW) +#define QPT_SDAM_SAMPLING_MS 1280 + +static int qpt_sdam_nvmem_read(struct qpt_priv *qpt, struct qpt_sdam *sdam, + uint16_t offset, size_t bytes, void *data) +{ + int rc = 0; + + mutex_lock(&sdam->lock); + rc = nvmem_device_read(sdam->nvmem, offset, bytes, data); + mutex_unlock(&sdam->lock); + if (rc < 0) + dev_err(qpt->dev, + "Failed to read sdam[%d] off:%#x,size:%ld rc=%d\n", + sdam->id, offset, bytes, rc); + return rc; +} + +static int qti_qpt_read_rtc_time(struct qpt_priv *qpt, u64 *rtc_ts) +{ + int rc = -1; + + rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM], + QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_RTC0, 4, &rtc_ts); + if (rc < 0) + return rc; + + return 0; +} + +static void qpt_channel_avg_data_update(struct qpt_device *qpt_dev, + uint8_t lsb, uint8_t msb, u64 ts) +{ + mutex_lock(&qpt_dev->lock); + qpt_dev->last_data = (msb << 8) | lsb; + qpt_dev->last_data_uw = QPT_GET_POWER_UW_FROM_ADC(qpt_dev->last_data); + mutex_unlock(&qpt_dev->lock); + QPT_DBG(qpt_dev->priv, "qpt[%s]: power:%lluuw msb:0x%x lsb:0x%x", + qpt_dev->name, qpt_dev->last_data_uw, msb, lsb); +} + +static int qti_qpt_read_seq_count(struct qpt_priv *qpt, int *seq_count) +{ + int rc = -1; + + rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM], + QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_SEQ_START, 1, &seq_count); + if (rc < 0) + return rc; + + return 0; +} + +static int qti_qpt_read_all_data(struct qpt_priv *qpt, uint16_t offset, size_t size) +{ + uint8_t data_sdam_avg[DATA_SDAM_POWER_MSB_CH48 + 1] = {0}; + int seq_count = 0; + int rc = 0; + struct qpt_device *qpt_dev; + int seq_count_start = -1; + + rc = qti_qpt_read_seq_count(qpt, &seq_count); + if (rc < 0) + return rc; + + do { + seq_count_start = seq_count; + rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM], offset, + size, data_sdam_avg); + if (rc < 0) + return rc; + + rc = qti_qpt_read_seq_count(qpt, &seq_count); + if (rc < 0) + return rc; + + } while (seq_count < seq_count_start); + + qpt->hw_read_ts = ktime_get(); + qti_qpt_read_rtc_time(qpt, &qpt->rtc_ts); + list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) { + if (!qpt_dev->enabled) + continue; + if (qpt_dev->data_offset >= (offset + size)) + continue; + qpt_channel_avg_data_update(qpt_dev, + data_sdam_avg[qpt_dev->data_offset], + data_sdam_avg[qpt_dev->data_offset + 1], + qpt->hw_read_ts); + } + QPT_DBG(qpt, "Time(us) to read all channel:%lldus & RTC Time:%lld", + ktime_to_us(ktime_sub(ktime_get(), qpt->hw_read_ts)), + qpt->rtc_ts); + + return 0; +} + +static void qti_qpt_get_power(struct qpt_device *qpt_dev, u64 *power_uw) +{ + mutex_lock(&qpt_dev->lock); + *power_uw = qpt_dev->last_data_uw; + mutex_unlock(&qpt_dev->lock); +} + +static int qti_qpt_read_data_update(struct qpt_priv *qpt) +{ + int rc = 0; + + mutex_lock(&qpt->hw_read_lock); + rc = qti_qpt_read_all_data(qpt, + QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1, + qpt->last_ch_offset + 2); + mutex_unlock(&qpt->hw_read_lock); + + if (rc < 0) + return rc; + + return 0; +} + +static irqreturn_t qpt_sdam_irq_handler(int irq, void *data) +{ + struct qpt_priv *qpt = data; + + qti_qpt_read_data_update(qpt); + + return IRQ_HANDLED; +} + +static int get_dt_index_from_ppid(struct qpt_device *qpt_dev) +{ + uint16_t ppid = 0, i = 0; + struct qpt_priv *qpt = qpt_dev->priv; + + if (!qpt_dev->enabled || !qpt->dt_reg_cnt) + return -EINVAL; + + ppid = qpt_dev->sid << 8 | qpt_dev->pid; + + for (i = 0; i < qpt->dt_reg_cnt; i++) { + if (ppid == qpt->reg_ppid_map[i]) + return i; + } + + return -ENODEV; +} + +static int qti_qpt_config_sdam_initialize(struct qpt_priv *qpt) +{ + uint8_t *config_sdam = NULL; + struct qpt_device *qpt_dev = NULL; + int rc = 0; + uint8_t conf_idx, data_idx; + + if (!qpt->sdam[CONFIG_SDAM].nvmem) { + dev_err(qpt->dev, "Invalid sdam nvmem\n"); + return -EINVAL; + } + + config_sdam = devm_kcalloc(qpt->dev, MAX_CONFIG_SDAM_DATA, + sizeof(*config_sdam), GFP_KERNEL); + if (!config_sdam) + return -ENOMEM; + + rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[CONFIG_SDAM], + QPT_CONFIG_SDAM_BASE_OFF, + MAX_CONFIG_SDAM_DATA, config_sdam); + if (rc < 0) + return rc; + + if (!(config_sdam[CONFIG_SDAM_QPT_MODE] & BIT(7))) { + dev_err(qpt->dev, "pmic qpt is in disabled state, reg:0x%x\n", + config_sdam[CONFIG_SDAM_QPT_MODE]); + return -ENODEV; + } + qpt->mode = config_sdam[CONFIG_SDAM_QPT_MODE] & BIT(0); + qpt->max_data = config_sdam[CONFIG_SDAM_MAX_DATA]; + qpt->config_sdam_data = config_sdam; + + /* logic to read number of channels and die_temps */ + for (conf_idx = CONFIG_SDAM_CONFIG_1, data_idx = 0; + conf_idx <= CONFIG_SDAM_CONFIG_48; + conf_idx += 2, data_idx += QPT_DATA_BYTE_SIZE) { + const char *reg_name; + + if (!(config_sdam[conf_idx] & QPT_CH_ENABLE_MASK)) + continue; + + qpt->num_reg++; + qpt_dev = devm_kzalloc(qpt->dev, sizeof(*qpt_dev), GFP_KERNEL); + if (!qpt_dev) + return -ENOMEM; + qpt_dev->enabled = true; + qpt_dev->sid = config_sdam[conf_idx] & QPT_SID_MASK; + qpt_dev->gang_num = config_sdam[conf_idx] & QPT_GANG_NUM_MASK; + qpt_dev->pid = config_sdam[conf_idx + 1]; + qpt_dev->priv = qpt; + qpt_dev->data_offset = data_idx; + mutex_init(&qpt_dev->lock); + if (data_idx > qpt->last_ch_offset) + qpt->last_ch_offset = data_idx; + + rc = get_dt_index_from_ppid(qpt_dev); + if (rc < 0 || rc >= qpt->dt_reg_cnt) { + dev_err(qpt->dev, "No matching channel ppid, rc:%d\n", + rc); + return rc; + } + of_property_read_string_index(qpt->dev->of_node, + "qcom,reg-ppid-names", rc, ®_name); + dev_dbg(qpt->dev, "%s: qpt channel:%s off:0x%x\n", __func__, + reg_name, data_idx); + strscpy(qpt_dev->name, reg_name, sizeof(qpt_dev->name)); + + list_add(&qpt_dev->qpt_node, &qpt->qpt_dev_head); + } + + return 0; +} + +static int qpt_get_sdam_nvmem(struct device *dev, struct qpt_sdam *sdam, + char *sdam_name) +{ + int rc = 0; + + sdam->nvmem = devm_nvmem_device_get(dev, sdam_name); + if (IS_ERR(sdam->nvmem)) { + rc = PTR_ERR(sdam->nvmem); + if (rc != -EPROBE_DEFER) + dev_err(dev, "Failed to get nvmem device, rc=%d\n", + rc); + sdam->nvmem = NULL; + return rc; + } + + return rc; +} + +static int qpt_parse_sdam_data(struct qpt_priv *qpt) +{ + int rc = 0; + char buf[20]; + + rc = of_property_count_strings(qpt->dev->of_node, "nvmem-names"); + if (rc < 0) { + dev_err(qpt->dev, "Could not find nvmem device\n"); + return rc; + } + if (rc != MAX_QPT_SDAM) { + dev_err(qpt->dev, "Invalid num of SDAMs:%d\n", rc); + return -EINVAL; + } + + qpt->num_sdams = rc; + qpt->sdam = devm_kcalloc(qpt->dev, qpt->num_sdams, + sizeof(*qpt->sdam), GFP_KERNEL); + if (!qpt->sdam) + return -ENOMEM; + + /* Check for config sdam */ + qpt->sdam[0].id = CONFIG_SDAM; + scnprintf(buf, sizeof(buf), "qpt-config-sdam"); + mutex_init(&qpt->sdam[0].lock); + rc = qpt_get_sdam_nvmem(qpt->dev, &qpt->sdam[0], buf); + if (rc < 0) + return rc; + + /* Check data sdam */ + qpt->sdam[1].id = DATA_AVG_SDAM; + mutex_init(&qpt->sdam[1].lock); + scnprintf(buf, sizeof(buf), "qpt-data-sdam"); + rc = qpt_get_sdam_nvmem(qpt->dev, &qpt->sdam[1], buf); + if (rc < 0) + return rc; + + return 0; +} + +static int qpt_pd_callback(struct notifier_block *nfb, + unsigned long action, void *v) +{ + struct qpt_priv *qpt = container_of(nfb, struct qpt_priv, genpd_nb); + ktime_t now; + s64 diff; + struct qpt_device *qpt_dev; + + if (atomic_read(&qpt->in_suspend)) + goto cb_exit; + + switch (action) { + case GENPD_NOTIFY_OFF: + if (qpt->irq_enabled) { + disable_irq_nosync(qpt->irq); + qpt->irq_enabled = false; + } + break; + case GENPD_NOTIFY_ON: + if (qpt->irq_enabled) + break; + now = ktime_get(); + diff = ktime_to_ms(ktime_sub(now, qpt->hw_read_ts)); + if (diff > QPT_SDAM_SAMPLING_MS) { + list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, + qpt_node) { + qpt_dev->last_data = 0; + qpt_dev->last_data_uw = 0; + } + } + enable_irq(qpt->irq); + qpt->irq_enabled = true; + break; + default: + break; + } +cb_exit: + return NOTIFY_OK; +} + +static int qti_qpt_pd_notifier_register(struct qpt_priv *qpt, struct device *dev) +{ + int ret; + + pm_runtime_enable(dev); + qpt->genpd_nb.notifier_call = qpt_pd_callback; + qpt->genpd_nb.priority = INT_MIN; + ret = dev_pm_genpd_add_notifier(dev, &qpt->genpd_nb); + if (ret) + pm_runtime_disable(dev); + return ret; +} + +static int qpt_parse_dt(struct qpt_priv *qpt) +{ + struct platform_device *pdev; + int rc = 0; + struct device_node *np = qpt->dev->of_node; + + pdev = of_find_device_by_node(np); + if (!pdev) { + dev_err(qpt->dev, "Invalid pdev\n"); + return -ENODEV; + } + + rc = of_property_count_strings(np, "qcom,reg-ppid-names"); + if (rc < 1 || rc >= QPT_POWER_CH_MAX) { + dev_err(qpt->dev, + "Invalid ppid name mapping count, rc=%d\n", rc); + return rc; + } + qpt->dt_reg_cnt = rc; + + rc = of_property_count_elems_of_size(np, "qcom,reg-ppid-ids", + sizeof(u16)); + if (rc < 1 || rc >= QPT_POWER_CH_MAX || rc != qpt->dt_reg_cnt) { + dev_err(qpt->dev, + "Invalid ppid mapping count, rc = %d strings:%d\n", + rc, qpt->dt_reg_cnt); + return rc; + } + + rc = of_property_read_u16_array(np, "qcom,reg-ppid-ids", + qpt->reg_ppid_map, qpt->dt_reg_cnt); + if (rc < 0) { + dev_err(qpt->dev, + "Failed to read ppid mapping array, rc = %d\n", rc); + return rc; + } + + rc = qpt_parse_sdam_data(qpt); + if (rc < 0) + return rc; + + rc = platform_get_irq(pdev, 0); + if (rc <= 0) { + dev_err(qpt->dev, "Failed to get qpt irq, rc=%d\n", rc); + return -EINVAL; + } + qpt->irq = rc; + + if (of_find_property(np, "power-domains", NULL) && pdev->dev.pm_domain) { + rc = qti_qpt_pd_notifier_register(qpt, &pdev->dev); + if (rc) { + dev_err(qpt->dev, "Failed to register for pd notifier\n"); + return rc; + } + } + + return 0; +} + +static int qti_qpt_hw_init(struct qpt_priv *qpt) +{ + int rc; + + if (qpt->initialized) + return 0; + + mutex_init(&qpt->hw_read_lock); + INIT_LIST_HEAD(&qpt->qpt_dev_head); + + rc = qpt_parse_dt(qpt); + if (rc < 0) { + dev_err(qpt->dev, "Failed to parse qpt rc=%d\n", rc); + return rc; + } + + rc = qti_qpt_config_sdam_initialize(qpt); + if (rc < 0) { + dev_err(qpt->dev, "Failed to parse config sdam rc=%d\n", rc); + return rc; + } + atomic_set(&qpt->in_suspend, 0); + + rc = devm_request_threaded_irq(qpt->dev, qpt->irq, + NULL, qpt_sdam_irq_handler, + IRQF_ONESHOT, "qti_qpt_irq", qpt); + if (rc < 0) { + dev_err(qpt->dev, + "Failed to request IRQ for qpt, rc=%d\n", rc); + return rc; + } + irq_set_status_flags(qpt->irq, IRQ_DISABLE_UNLAZY); + qpt->irq_enabled = true; + + qpt->initialized = true; + /* Update first reading for all channels */ + qti_qpt_read_data_update(qpt); + + return 0; +} + +static int qti_qpt_suspend(struct qpt_priv *qpt) +{ + atomic_set(&qpt->in_suspend, 1); + + if (qpt->irq_enabled) { + disable_irq_nosync(qpt->irq); + qpt->irq_enabled = false; + } + + return 0; +} + +static int qti_qpt_resume(struct qpt_priv *qpt) +{ + struct qpt_device *qpt_dev = NULL; + ktime_t now; + s64 diff; + + now = ktime_get(); + diff = ktime_to_ms(ktime_sub(now, qpt->hw_read_ts)); + if (diff > QPT_SDAM_SAMPLING_MS) { + list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, + qpt_node) { + qpt_dev->last_data = 0; + qpt_dev->last_data_uw = 0; + } + } + if (!qpt->irq_enabled) { + enable_irq(qpt->irq); + qpt->irq_enabled = true; + } + atomic_set(&qpt->in_suspend, 0); + + return 0; +} + +static void qti_qpt_hw_release(struct qpt_priv *qpt) +{ + pm_runtime_disable(qpt->dev); + dev_pm_genpd_remove_notifier(qpt->dev); +} + +struct qpt_ops qpt_hw_ops = { + .init = qti_qpt_hw_init, + .get_power = qti_qpt_get_power, + .suspend = qti_qpt_suspend, + .resume = qti_qpt_resume, + .release = qti_qpt_hw_release, +}; + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Telemetry driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/powercap/qti_power_telemetry.h b/drivers/powercap/qti_power_telemetry.h new file mode 100644 index 000000000000..6ec6a3e9a082 --- /dev/null +++ b/drivers/powercap/qti_power_telemetry.h @@ -0,0 +1,194 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __QCOM_QPT_H__ +#define __QCOM_QPT_H__ + +#include +#include +#include + +struct qpt_priv; +struct qpt_device; + +#define IPC_LOGPAGES 10 +#define QPT_DBG(qpt, msg, args...) do { \ + dev_dbg(qpt->dev, "%s:" msg, __func__, args); \ + if ((qpt) && (qpt)->ipc_log) { \ + ipc_log_string((qpt)->ipc_log, \ + "[%s] "msg"\n", \ + current->comm, args); \ + } \ + } while (0) + +#define QPT_REG_NAME_LENGTH 32 +#define QPT_POWER_CH_MAX 48 +#define QPT_TZ_CH_MAX 8 +#define QPT_MAX_DATA_MAX 10 + +/* Different qpt modes of operation */ +enum qpt_mode { + QPT_ACAT_MODE, + QPT_RCM_MODE, + QPT_MODE_MAX +}; + +/* Different qpt sdam IDs to use as an index into an array */ +enum qpt_sdam_id { + CONFIG_SDAM, + DATA_AVG_SDAM, + MAX_QPT_SDAM +}; + +/* Data sdam field IDs to use as an index into an array */ +enum data_sdam_field_ids { + DATA_SDAM_SEQ_START, + DATA_SDAM_SEQ_END, + DATA_SDAM_NUM_RECORDS, + DATA_SDAM_RTC0, + DATA_SDAM_RTC1, + DATA_SDAM_RTC2, + DATA_SDAM_RTC3, + DATA_SDAM_VPH_LSB, + DATA_SDAM_VPH_MSB, + DATA_SDAM_DIE_TEMP_SID1, + DATA_SDAM_DIE_TEMP_SID8 = DATA_SDAM_DIE_TEMP_SID1 + QPT_TZ_CH_MAX - 1, + DATA_SDAM_POWER_LSB_CH1, + DATA_SDAM_POWER_MSB_CH1, + DATA_SDAM_POWER_LSB_CH48 = DATA_SDAM_POWER_LSB_CH1 + 2 * (QPT_POWER_CH_MAX - 1), + DATA_SDAM_POWER_MSB_CH48, + MAX_SDAM_DATA +}; + +/* config sdam field IDs to use as an index into an array */ +enum config_sdam_field_ids { + CONFIG_SDAM_QPT_MODE, + CONFIG_SDAM_QPT_STATUS, + CONFIG_SDAM_MAX_DATA, + CONFIG_SDAM_MEAS_CFG, + CONFIG_SDAM_LAST_FULL_SDAM, + CONFIG_SDAM_CONFIG_1, + CONFIG_SDAM_PID_1, + CONFIG_SDAM_CONFIG_48 = CONFIG_SDAM_CONFIG_1 + 2 * (QPT_POWER_CH_MAX - 1), + MAX_CONFIG_SDAM_DATA +}; + +/** + * struct qpt_sdam - QPT sdam data structure + * @id: QPT sdam id type + * @nvmem: Pointer to nvmem device + * @lock: lock to protect multiple read concurrently + * @last_data: last full read data copy for current sdam + */ +struct qpt_sdam { + enum qpt_sdam_id id; + struct nvmem_device *nvmem; + struct mutex lock; + uint8_t last_data[MAX_CONFIG_SDAM_DATA]; +}; + +/** + * struct qpt_device - Each regulator channel device data + * @qpt_node: qpt device list head member to traverse all devices + * @priv: qpt hardware instance that this channel is connected to + * @pz: array of powercap zone data types for different data retrieval + * @name: name of the regulator which is used to identify channel + * @enabled: qpt channel is enabled or not + * @sid: qpt channel SID + * @pid: qpt channel PID + * @gang_num: qpt channel gang_num + * @data_offset: qpt channel power data offset from DATA sdam base + * @last_data: qpt channel last 1S data + * @last_data_uw: qpt channel last 10S average data + * @lock: lock to protect multiple client read concurrently + */ +struct qpt_device { + struct list_head qpt_node; + struct qpt_priv *priv; + struct powercap_zone pz; + char name[QPT_REG_NAME_LENGTH]; + bool enabled; + uint8_t sid; + uint8_t pid; + uint8_t gang_num; + uint8_t data_offset; + uint16_t last_data; + u64 last_data_uw; + struct mutex lock; +}; + +/** + * struct qpt_priv - Structure for QPT hardware private data + * @dev: Pointer for QPT device + * @mode: enum to give current mode of operation + * @sdam: Pointer for array of QPT sdams + * @pct: pointer to powercap control type + * @irq: qpt sdam pbs irq number + * @num_sdams: Number of SDAMs used for QPT from DT + * @num_reg: Number of regulator based on config sdam + * @max_data: QPT hardware max_data configuration + * @reg_ppid_map: array of regulator/rail PPID from devicetree + * @dt_reg_cnt: Number of regulator count in devicetree + * @last_ch_offset: Last enabled data channel offset + * @initialized: QPT hardware initialization is done if it is true + * @irq_enabled: The qpt irq enable/disable status + * @in_suspend: The QPT driver suspend status + * @ops: QPT hardware supported ops + * @config_sdam_data: Config sdam data dump collected at init + * @ipc_log: Handle to ipc_logging + * @hw_read_ts: Timestamp collected just after qpt irq data update + * @rtc_ts: RTC Timestamp collected just after qpt irq data update + * @qpt_dev_head: List head for all qpt channel devices + * @hw_read_lock: lock to protect avg data update and client request + * @genpd_nb: Genpd notifier for apps idle notification + */ +struct qpt_priv { + struct device *dev; + enum qpt_mode mode; + struct qpt_sdam *sdam; + struct powercap_control_type *pct; + int irq; + u32 num_sdams; + u32 num_reg; + u8 max_data; + u16 reg_ppid_map[QPT_POWER_CH_MAX]; + u8 dt_reg_cnt; + u8 last_ch_offset; + bool initialized; + bool irq_enabled; + atomic_t in_suspend; + struct qpt_ops *ops; + uint8_t *config_sdam_data; + void *ipc_log; + u64 hw_read_ts; + u64 rtc_ts; + struct list_head qpt_dev_head; + struct mutex hw_read_lock; + struct notifier_block genpd_nb; +}; + +/** + * struct qpt_ops - Structure for QPT hardware supported ops + * @init: QPT hardware init function + * @get_mode: Function to get current QPT operation mode + * @get_power: Function to get power for QPT channel in us for a given type + * @get_max_power: Function to get max power which QPT channel can deliver + * @release: Function to clear all QPT data on exit + * @suspend: Function to execute QPT during suspend callback if any + * @resume: Function to restore QPT durng resume callback if any + */ +struct qpt_ops { + int (*init)(struct qpt_priv *priv); + void (*get_power)(struct qpt_device *qpt_dev, u64 *power); + int (*get_max_power)(const struct qpt_device *qpt_dev, u64 *max_power); + void (*release)(struct qpt_priv *qpt); + int (*suspend)(struct qpt_priv *qpt); + int (*resume)(struct qpt_priv *qpt); +}; + +extern struct qpt_ops qpt_hw_ops; +extern void qpt_sysfs_notify(struct qpt_priv *qpt); + +#endif /* __QCOM_QPT_H__ */ diff --git a/drivers/powercap/qti_power_telemetry_interface.c b/drivers/powercap/qti_power_telemetry_interface.c new file mode 100644 index 000000000000..e4d8d0bb6b2f --- /dev/null +++ b/drivers/powercap/qti_power_telemetry_interface.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ + +#include +#include +#include +#include +#include +#include "qti_power_telemetry.h" + +#define QPT_HW "qti-qpt-hw" + +static inline struct qpt_device *to_qpt_dev_pz(struct powercap_zone *pz) +{ + return container_of(pz, struct qpt_device, pz); +} + +static const char * const constraint_name[] = { + "dummy", +}; + +void qpt_sysfs_notify(struct qpt_priv *qpt) +{ + struct powercap_control_type *pct; + + if (!qpt || !qpt->pct) + return; + + pct = qpt->pct; + sysfs_notify(&pct->dev.kobj, NULL, "enabled"); +} + +static int qpt_suspend(struct device *dev) +{ + struct qpt_device *qpt_dev = dev_get_drvdata(dev); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->suspend) + return qpt->ops->suspend(qpt); + + return 0; +} + +static int qpt_resume(struct device *dev) +{ + struct qpt_device *qpt_dev = dev_get_drvdata(dev); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->resume) + return qpt->ops->resume(qpt); + + return 0; +} + +static int qpt_get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window) +{ + return -EOPNOTSUPP; +} + +static int qpt_set_time_window_us(struct powercap_zone *pcz, int cid, u64 window) +{ + return -EOPNOTSUPP; +} + +static int qpt_get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw) +{ + struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->get_max_power) + qpt->ops->get_max_power(qpt_dev, max_power_uw); + + return 0; +} + +static int qpt_get_power_uw(struct powercap_zone *pcz, u64 *power_uw) +{ + struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->get_power) + qpt->ops->get_power(qpt_dev, power_uw); + else + return -EOPNOTSUPP; + + return 0; +} + +static int qpt_release_zone(struct powercap_zone *pcz) +{ + struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->release) + qpt->ops->release(qpt); + + return 0; +} + +static int qpt_get_power_limit_uw(struct powercap_zone *pcz, + int cid, u64 *power_limit) +{ + return -EOPNOTSUPP; +} + +static int qpt_set_power_limit_uw(struct powercap_zone *pcz, + int cid, u64 power_limit) +{ + return -EOPNOTSUPP; +} + +static const char *get_constraint_name(struct powercap_zone *pcz, int cid) +{ + return constraint_name[cid]; +} + +static int qpt_get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power) +{ + struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz); + struct qpt_priv *qpt = qpt_dev->priv; + + if (qpt->ops->get_max_power) + return qpt->ops->get_max_power(qpt_dev, max_power); + else + return -EOPNOTSUPP; +} + +static struct powercap_zone_constraint_ops constraint_ops = { + .set_power_limit_uw = qpt_set_power_limit_uw, + .get_power_limit_uw = qpt_get_power_limit_uw, + .set_time_window_us = qpt_set_time_window_us, + .get_time_window_us = qpt_get_time_window_us, + .get_max_power_uw = qpt_get_max_power_uw, + .get_name = get_constraint_name, +}; + +static struct powercap_zone_ops zone_ops = { + .get_max_power_range_uw = qpt_get_max_power_range_uw, + .get_power_uw = qpt_get_power_uw, + .release = qpt_release_zone, +}; + +static int powercap_register(struct qpt_priv *qpt) +{ + struct qpt_device *qpt_dev; + struct powercap_zone *pcz = NULL; + + qpt->pct = powercap_register_control_type(NULL, "qpt", NULL); + if (IS_ERR(qpt->pct)) { + dev_err(qpt->dev, "Failed to register control type\n"); + return PTR_ERR(qpt->pct); + } + + list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) { + if (!qpt_dev->enabled) + continue; + + pcz = powercap_register_zone(&qpt_dev->pz, qpt->pct, + qpt_dev->name, NULL, &zone_ops, 1, + &constraint_ops); + if (IS_ERR(pcz)) + return PTR_ERR(pcz); + } + return 0; +} + +static int qpt_hw_device_probe(struct platform_device *pdev) +{ + int ret; + struct qpt_priv *qpt; + + qpt = devm_kzalloc(&pdev->dev, sizeof(*qpt), GFP_KERNEL); + if (!qpt) + return -ENOMEM; + + qpt->dev = &pdev->dev; + qpt->ops = &qpt_hw_ops; + + qpt->ipc_log = ipc_log_context_create(IPC_LOGPAGES, "Qpt", 0); + if (!qpt->ipc_log) + dev_err(qpt->dev, "%s: unable to create IPC Logging for %s\n", + __func__, "qti_qpt"); + + + if (!qpt->ops || !qpt->ops->init || + !qpt->ops->get_power || !qpt->ops->release) + return -EINVAL; + + ret = qpt->ops->init(qpt); + if (ret < 0) { + dev_err(&pdev->dev, "%s: init failed\n", __func__); + return ret; + } + + platform_set_drvdata(pdev, qpt); + dev_set_drvdata(qpt->dev, qpt); + + return powercap_register(qpt); +} + +static int qpt_hw_device_remove(struct platform_device *pdev) +{ + struct qpt_priv *qpt = platform_get_drvdata(pdev); + struct qpt_device *qpt_dev; + + list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) { + if (qpt->pct) + powercap_unregister_zone(qpt->pct, + &qpt_dev->pz); + } + if (qpt->pct) + powercap_unregister_control_type(qpt->pct); + + if (qpt->ops->release) + qpt->ops->release(qpt); + + return 0; +} + +static const struct dev_pm_ops qpt_pm_ops = { + .suspend = qpt_suspend, + .resume = qpt_resume, +}; + +static const struct of_device_id qpt_hw_device_match[] = { + {.compatible = "qcom,power-telemetry"}, + {} +}; + +static struct platform_driver qpt_hw_device_driver = { + .probe = qpt_hw_device_probe, + .remove = qpt_hw_device_remove, + .driver = { + .name = QPT_HW, + .pm = &qpt_pm_ops, + .of_match_table = qpt_hw_device_match, + }, +}; + +module_platform_driver(qpt_hw_device_driver); + +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Telemetry driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/ap72200-regulator.c b/drivers/regulator/ap72200-regulator.c index 36bf36c9567a..9242247c3724 100644 --- a/drivers/regulator/ap72200-regulator.c +++ b/drivers/regulator/ap72200-regulator.c @@ -39,18 +39,26 @@ static const struct regmap_config ap27700_regmap_config = { .max_register = 0x4, }; +#define AP72200_MAX_WRITE_RETRIES 4 + static int ap72200_vreg_enable(struct regulator_dev *rdev) { struct ap72200_vreg *vreg = rdev_get_drvdata(rdev); - int rc, val; + int rc, val, retries; gpiod_set_value_cansleep(vreg->ena_gpiod, 1); val = DIV_ROUND_UP(vreg->rdesc.fixed_uV - AP72200_MIN_UV, AP72200_STEP_UV); - /* Set the voltage */ - rc = regmap_write(vreg->regmap, AP72200_VSEL_REG_ADDR, - val); + retries = AP72200_MAX_WRITE_RETRIES; + do { + /* Set the voltage */ + rc = regmap_write(vreg->regmap, AP72200_VSEL_REG_ADDR, + val); + if (!rc) + break; + } while (retries--); + if (rc) { dev_err(vreg->dev, "Failed to set voltage rc: %d\n", rc); return rc; diff --git a/drivers/rpmsg/virtio_glink_cma.c b/drivers/rpmsg/virtio_glink_cma.c index 82132ac7b93f..6dc96525da54 100644 --- a/drivers/rpmsg/virtio_glink_cma.c +++ b/drivers/rpmsg/virtio_glink_cma.c @@ -141,7 +141,7 @@ static int virtio_glink_bridge_send_msg(struct virtio_glink_bridge *vgbridge, memset(msg, 0, sizeof(*msg)); msg->type = cpu_to_virtio32(vdev, msg_type); msg->label = cpu_to_virtio32(vdev, label); - sg_init_one(&sg, msg, sizeof(*msg)); + sg_init_one(&sg, msg, sizeof(struct virtio_glink_bridge_msg)); rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_KERNEL); if (rc) { @@ -167,7 +167,7 @@ static int virtio_glink_bridge_send_msg_ack(struct virtio_glink_bridge *vgbridge ack->type = cpu_to_virtio32(vdev, msg_type); ack->label = cpu_to_virtio32(vdev, label); ack->status = cpu_to_virtio32(vdev, status); - sg_init_one(&sg, ack, sizeof(*ack)); + sg_init_one(&sg, ack, sizeof(struct virtio_glink_bridge_msg)); rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_KERNEL); if (rc) { diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 67b3cd43ad8e..f63820f1971f 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -107,6 +107,22 @@ config QCOM_DCC_V2 driver provides interface to configure DCC block and read back captured data from DCC's internal SRAM. +config MSM_SPM + bool "Driver support for SPM and AVS wrapper hardware" + help + Enables the support for SPM and AVS wrapper hardware on MSMs. SPM + hardware is used to manage the processor power during sleep. The + driver allows configuring SPM to allow different low power modes for + both core and L2. + +config MSM_L2_SPM + bool "SPM support for L2 cache" + help + Enable SPM driver support for L2 cache. Some MSM chipsets allow + control of L2 cache low power mode with a Subsystem Power manager. + Enabling this driver allows configuring L2 SPM for low power modes + on supported chipsets. + config QCOM_GENI_SE tristate "QCOM GENI Serial Engine Driver" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 0f25fd53f22b..f58f50c53cec 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -60,6 +60,7 @@ obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o obj-$(CONFIG_USB_BAM) += usb_bam.o obj-$(CONFIG_QCOM_CPU_VENDOR_HOOKS) += qcom_cpu_vendor_hooks.o obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti.o +obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o crypto-qti-y += crypto-qti-common.o crypto-qti-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER) += crypto-qti-hwkm.o diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c index a01057a513e7..74c01eeafd72 100644 --- a/drivers/soc/qcom/hab/hab_mem_linux.c +++ b/drivers/soc/qcom/hab/hab_mem_linux.c @@ -257,7 +257,8 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address, int page_count) { struct page **pages = NULL; - int i, ret = 0; + struct vm_area_struct *vma = NULL; + int i, ret, page_nr = 0; struct dma_buf *dmabuf = NULL; struct pages_list *pglist = NULL; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); @@ -276,14 +277,40 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address, mmap_read_lock(current->mm); - ret = get_user_pages(address, page_count, 0, pages, NULL); + /* + * Need below sanity checks: + * 1. input uva is covered by an existing VMA of the current process + * 2. the given uva range is fully covered in the same VMA + */ + vma = vma_lookup(current->mm, address); + if (!range_in_vma(vma, address, address + page_count * PAGE_SIZE)) { + mmap_read_unlock(current->mm); + pr_err("input uva [0x%lx, 0x%lx) not covered in one VMA. UVA or size(%d) is invalid\n", + address, address + page_count * PAGE_SIZE, page_count * PAGE_SIZE); + ret = -EINVAL; + goto err; + } + page_nr = get_user_pages(address, page_count, 0, pages, NULL); mmap_read_unlock(current->mm); - if (ret <= 0) { + if (page_nr <= 0) { ret = -EINVAL; pr_err("get %d user pages failed %d\n", - page_count, ret); + page_count, page_nr); + goto err; + } + + /* + * The actual number of the pinned pages is returned by get_user_pages. + * It may not match with the requested number. + */ + if (page_nr != page_count) { + ret = -EINVAL; + pr_err("input page cnt %d not match with pinned %d\n", page_count, page_nr); + for (i = 0; i < page_nr; i++) + put_page(pages[i]); + goto err; } @@ -306,6 +333,7 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address, ret = PTR_ERR(dmabuf); goto err; } + return dmabuf; err: diff --git a/drivers/soc/qcom/hgsl/hgsl.c b/drivers/soc/qcom/hgsl/hgsl.c index 257e45250050..5b608de1b407 100644 --- a/drivers/soc/qcom/hgsl/hgsl.c +++ b/drivers/soc/qcom/hgsl/hgsl.c @@ -2282,7 +2282,7 @@ static int hgsl_ioctl_mem_alloc( struct hgsl_priv *priv = filep->private_data; struct hgsl_ioctl_mem_alloc_params *params = data; struct qcom_hgsl *hgsl = priv->dev; - int ret = 0; + int ret = 0, mem_fd = -1; struct hgsl_mem_node *mem_node = NULL; struct hgsl_hab_channel_t *hab_channel = NULL; @@ -2298,6 +2298,13 @@ static int hgsl_ioctl_mem_alloc( goto out; } + mem_fd = get_unused_fd_flags(O_CLOEXEC); + if (mem_fd < 0) { + LOGE("no available fd %d", mem_fd); + ret = -EMFILE; + goto out; + } + mem_node = hgsl_mem_node_zalloc(hgsl->default_iocoherency); if (mem_node == NULL) { ret = -ENOMEM; @@ -2316,30 +2323,34 @@ static int hgsl_ioctl_mem_alloc( if (ret) goto out; - /* increase reference count before install fd. */ - get_dma_buf(mem_node->dma_buf); - params->fd = dma_buf_fd(mem_node->dma_buf, O_CLOEXEC); - - if (params->fd < 0) { - LOGE("dma_buf_fd failed, size 0x%x", mem_node->memdesc.size); - ret = -EINVAL; - dma_buf_put(mem_node->dma_buf); - goto out; - } if (copy_to_user(USRPTR(params->memdesc), &mem_node->memdesc, sizeof(mem_node->memdesc))) { ret = -EFAULT; goto out; } + + /* increase reference count before install fd. */ + get_dma_buf(mem_node->dma_buf); mutex_lock(&priv->lock); - list_add(&mem_node->node, &priv->mem_allocated); - hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64); + ret = hgsl_mem_add_node(&priv->mem_allocated, mem_node); + if (unlikely(ret)) + dma_buf_put(mem_node->dma_buf); + else { + params->fd = mem_fd; + fd_install(params->fd, mem_node->dma_buf->file); + hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64); + } mutex_unlock(&priv->lock); out: - if (ret && mem_node) { - hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node); - hgsl_sharedmem_free(mem_node); + if (ret) { + if (mem_node) { + hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node); + hgsl_sharedmem_free(mem_node); + } + + if (mem_fd >= 0) + put_unused_fd(mem_fd); } hgsl_hyp_channel_pool_put(hab_channel); return ret; @@ -2354,7 +2365,6 @@ static int hgsl_ioctl_mem_free( struct gsl_memdesc_t memdesc; int ret = 0; struct hgsl_mem_node *node_found = NULL; - struct hgsl_mem_node *tmp = NULL; struct hgsl_hab_channel_t *hab_channel = NULL; ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel); @@ -2371,16 +2381,11 @@ static int hgsl_ioctl_mem_free( } mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_allocated, node) { - if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr) - && (tmp->memdesc.size == memdesc.size)) { - node_found = tmp; - list_del(&node_found->node); - break; - } - } + node_found = hgsl_mem_find_node_locked(&priv->mem_allocated, + memdesc.gpuaddr, memdesc.size64, true); + if (node_found) + rb_erase(&node_found->mem_rb_node, &priv->mem_allocated); mutex_unlock(&priv->lock); - if (node_found) { ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found); if (!ret) { @@ -2390,14 +2395,14 @@ static int hgsl_ioctl_mem_free( } else { LOGE("hgsl_hyp_mem_unmap_smmu failed %d", ret); mutex_lock(&priv->lock); - list_add(&node_found->node, &priv->mem_allocated); + ret = hgsl_mem_add_node(&priv->mem_allocated, node_found); mutex_unlock(&priv->lock); + if (unlikely(ret)) + LOGE("unlikely to get here! %d", ret); } - } else { + } else LOGE("can't find the memory 0x%llx, 0x%x", memdesc.gpuaddr, memdesc.size); - goto out; - } out: hgsl_hyp_channel_pool_put(hab_channel); @@ -2413,6 +2418,7 @@ static int hgsl_ioctl_set_metainfo( int ret = 0; struct hgsl_mem_node *mem_node = NULL; struct hgsl_mem_node *tmp = NULL; + struct rb_node *rb = NULL; char metainfo[HGSL_MEM_META_MAX_SIZE] = {0}; if (params->metainfo_len > HGSL_MEM_META_MAX_SIZE) { @@ -2429,7 +2435,8 @@ static int hgsl_ioctl_set_metainfo( metainfo[HGSL_MEM_META_MAX_SIZE - 1] = '\0'; mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_allocated, node) { + for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) { + tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node); if (tmp->memdesc.priv64 == params->memdesc_priv) { mem_node = tmp; break; @@ -2482,19 +2489,21 @@ static int hgsl_ioctl_mem_map_smmu( mem_node->memtype = params->memtype; ret = hgsl_hyp_mem_map_smmu(hab_channel, params->size, params->offset, mem_node); + if (ret) + goto out; - if (ret == 0) { - if (copy_to_user(USRPTR(params->memdesc), &mem_node->memdesc, - sizeof(mem_node->memdesc))) { - ret = -EFAULT; - goto out; - } - mutex_lock(&priv->lock); - list_add(&mem_node->node, &priv->mem_mapped); - hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64); - mutex_unlock(&priv->lock); + if (copy_to_user(USRPTR(params->memdesc), &mem_node->memdesc, + sizeof(mem_node->memdesc))) { + ret = -EFAULT; + goto out; } + mutex_lock(&priv->lock); + ret = hgsl_mem_add_node(&priv->mem_mapped, mem_node); + if (likely(!ret)) + hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64); + mutex_unlock(&priv->lock); + out: if (ret) { hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node); @@ -2512,7 +2521,6 @@ static int hgsl_ioctl_mem_unmap_smmu( struct hgsl_ioctl_mem_unmap_smmu_params *params = data; int ret = 0; struct hgsl_mem_node *node_found = NULL; - struct hgsl_mem_node *tmp = NULL; struct hgsl_hab_channel_t *hab_channel = NULL; ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel); @@ -2522,31 +2530,29 @@ static int hgsl_ioctl_mem_unmap_smmu( } mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_mapped, node) { - if ((tmp->memdesc.gpuaddr == params->gpuaddr) - && (tmp->memdesc.size == params->size)) { - node_found = tmp; - list_del(&node_found->node); - break; - } - } + node_found = hgsl_mem_find_node_locked(&priv->mem_mapped, + params->gpuaddr, params->size, true); + if (node_found) + rb_erase(&node_found->mem_rb_node, &priv->mem_mapped); mutex_unlock(&priv->lock); if (node_found) { hgsl_put_sgt(node_found, false); ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found); - if (ret) { - mutex_lock(&priv->lock); - list_add(&node_found->node, &priv->mem_mapped); - mutex_unlock(&priv->lock); - } else { + if (!ret) { hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64)); hgsl_free(node_found); + } else { + LOGE("hgsl_hyp_mem_unmap_smmu failed %d", ret); + mutex_lock(&priv->lock); + ret = hgsl_mem_add_node(&priv->mem_mapped, node_found); + mutex_unlock(&priv->lock); + if (unlikely(ret)) + LOGE("unlikely to get here! %d", ret); } - } else { + } else ret = -EINVAL; - } out: hgsl_hyp_channel_pool_put(hab_channel); @@ -2573,15 +2579,16 @@ static int hgsl_ioctl_mem_cache_operation( } mutex_lock(&priv->lock); - node_found = hgsl_mem_find_base_locked(&priv->mem_allocated, - gpuaddr, params->sizebytes); + node_found = hgsl_mem_find_node_locked(&priv->mem_allocated, + gpuaddr, params->sizebytes, false); if (node_found) internal = true; else { - node_found = hgsl_mem_find_base_locked(&priv->mem_mapped, - gpuaddr, params->sizebytes); + node_found = hgsl_mem_find_node_locked(&priv->mem_mapped, + gpuaddr, params->sizebytes, false); if (!node_found) { - LOGE("failed to find node %d", ret); + LOGE("failed to find gpuaddr: 0x%llx size: 0x%llx", + gpuaddr, params->sizebytes); ret = -EINVAL; mutex_unlock(&priv->lock); goto out; @@ -2607,7 +2614,6 @@ static int hgsl_ioctl_mem_get_fd( struct hgsl_ioctl_mem_get_fd_params *params = data; struct gsl_memdesc_t memdesc; struct hgsl_mem_node *node_found = NULL; - struct hgsl_mem_node *tmp = NULL; int ret = 0; if (copy_from_user(&memdesc, USRPTR(params->memdesc), @@ -2618,28 +2624,25 @@ static int hgsl_ioctl_mem_get_fd( } mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_allocated, node) { - if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr) - && (tmp->memdesc.size == memdesc.size)) { - node_found = tmp; - break; - } - } - params->fd = -1; - if (node_found && node_found->dma_buf) { + node_found = hgsl_mem_find_node_locked(&priv->mem_allocated, + memdesc.gpuaddr, memdesc.size64, true); + if (node_found && node_found->dma_buf) get_dma_buf(node_found->dma_buf); + else + ret = -EINVAL; + mutex_unlock(&priv->lock); + + params->fd = -1; + if (!ret) { params->fd = dma_buf_fd(node_found->dma_buf, O_CLOEXEC); if (params->fd < 0) { LOGE("dma buf to fd failed"); ret = -EINVAL; dma_buf_put(node_found->dma_buf); } - } else { + } else LOGE("can't find the memory 0x%llx, 0x%x, node_found:%p", memdesc.gpuaddr, memdesc.size, node_found); - ret = -EINVAL; - } - mutex_unlock(&priv->lock); out: return ret; @@ -3251,8 +3254,8 @@ static int hgsl_open(struct inode *inodep, struct file *filep) goto out; } - INIT_LIST_HEAD(&priv->mem_mapped); - INIT_LIST_HEAD(&priv->mem_allocated); + priv->mem_mapped = RB_ROOT; + priv->mem_allocated = RB_ROOT; mutex_init(&priv->lock); priv->pid = pid_nr; @@ -3279,33 +3282,27 @@ out: static int hgsl_cleanup(struct hgsl_priv *priv) { struct hgsl_mem_node *node_found = NULL; - struct hgsl_mem_node *tmp = NULL; - int ret; - bool need_notify = (!list_empty(&priv->mem_mapped) || - !list_empty(&priv->mem_allocated)); + struct rb_node *next = NULL; + int ret = 0; struct hgsl_hab_channel_t *hab_channel = NULL; - if (need_notify) { - ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel); - if (ret) - LOGE("Failed to get channel %d", ret); + if (hgsl_mem_rb_empty(priv)) + goto out; - ret = hgsl_hyp_notify_cleanup(hab_channel, HGSL_CLEANUP_WAIT_SLICE_IN_MS); - if (ret == -ETIMEDOUT) { - hgsl_hyp_channel_pool_put(hab_channel); - return ret; - } + ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel); + if (ret) { + LOGE("Failed to get channel %d", ret); + goto out; } + ret = hgsl_hyp_notify_cleanup(hab_channel, HGSL_CLEANUP_WAIT_SLICE_IN_MS); + if (ret == -ETIMEDOUT) + goto out; + mutex_lock(&priv->lock); - if ((hab_channel == NULL) && - (!list_empty(&priv->mem_mapped) || !list_empty(&priv->mem_allocated))) { - ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel); - if (ret) - LOGE("Failed to get channel %d", ret); - } - - list_for_each_entry_safe(node_found, tmp, &priv->mem_mapped, node) { + next = rb_first(&priv->mem_mapped); + while (next) { + node_found = rb_entry(next, struct hgsl_mem_node, mem_rb_node); hgsl_put_sgt(node_found, false); ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found); if (ret) @@ -3313,22 +3310,30 @@ static int hgsl_cleanup(struct hgsl_priv *priv) node_found->export_id, node_found->memdesc.gpuaddr, ret); else hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64)); - list_del(&node_found->node); + + next = rb_next(&node_found->mem_rb_node); + rb_erase(&node_found->mem_rb_node, &priv->mem_mapped); hgsl_free(node_found); } - list_for_each_entry_safe(node_found, tmp, &priv->mem_allocated, node) { + + next = rb_first(&priv->mem_allocated); + while (next) { + node_found = rb_entry(next, struct hgsl_mem_node, mem_rb_node); ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found); if (ret) LOGE("Failed to clean mapped buffer %u, 0x%llx, ret %d", node_found->export_id, node_found->memdesc.gpuaddr, ret); - list_del(&node_found->node); hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64)); + + next = rb_next(&node_found->mem_rb_node); + rb_erase(&node_found->mem_rb_node, &priv->mem_allocated); hgsl_sharedmem_free(node_found); } mutex_unlock(&priv->lock); +out: hgsl_hyp_channel_pool_put(hab_channel); - return 0; + return ret; } static int _hgsl_release(struct hgsl_priv *priv) diff --git a/drivers/soc/qcom/hgsl/hgsl.h b/drivers/soc/qcom/hgsl/hgsl.h index c4499830e08c..c5b8d9ac1327 100644 --- a/drivers/soc/qcom/hgsl/hgsl.h +++ b/drivers/soc/qcom/hgsl/hgsl.h @@ -192,8 +192,8 @@ struct hgsl_priv { struct list_head node; struct hgsl_hyp_priv_t hyp_priv; struct mutex lock; - struct list_head mem_mapped; - struct list_head mem_allocated; + struct rb_root mem_mapped; + struct rb_root mem_allocated; int open_count; atomic64_t total_mem_size; @@ -230,6 +230,12 @@ static inline bool hgsl_ts_ge(uint64_t a, uint64_t b, bool is64) return hgsl_ts32_ge((uint32_t)a, (uint32_t)b); } +static inline bool hgsl_mem_rb_empty(struct hgsl_priv *priv) +{ + return (RB_EMPTY_ROOT(&priv->mem_mapped) && + RB_EMPTY_ROOT(&priv->mem_allocated)); +} + /** * struct hgsl_hsync_timeline - A sync timeline attached under each hgsl context * @kref: Refcount to keep the struct alive diff --git a/drivers/soc/qcom/hgsl/hgsl_debugfs.c b/drivers/soc/qcom/hgsl/hgsl_debugfs.c index bd153eb60f9f..2ff0f39978b8 100644 --- a/drivers/soc/qcom/hgsl/hgsl_debugfs.c +++ b/drivers/soc/qcom/hgsl/hgsl_debugfs.c @@ -14,12 +14,14 @@ static int hgsl_client_mem_show(struct seq_file *s, void *unused) { struct hgsl_priv *priv = s->private; struct hgsl_mem_node *tmp = NULL; + struct rb_node *rb = NULL; seq_printf(s, "%16s %16s %10s %10s\n", "gpuaddr", "size", "flags", "type"); mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_allocated, node) { + for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) { + tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node); seq_printf(s, "%p %16llx %10x %10d\n", tmp->memdesc.gpuaddr, tmp->memdesc.size, @@ -37,6 +39,7 @@ static int hgsl_client_memtype_show(struct seq_file *s, void *unused) { struct hgsl_priv *priv = s->private; struct hgsl_mem_node *tmp = NULL; + struct rb_node *rb = NULL; int i; int memtype; @@ -71,7 +74,8 @@ static int hgsl_client_memtype_show(struct seq_file *s, void *unused) gpu_mem_types[i].size = 0; mutex_lock(&priv->lock); - list_for_each_entry(tmp, &priv->mem_allocated, node) { + for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) { + tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node); memtype = GET_MEMTYPE(tmp->flags); if (memtype < ARRAY_SIZE(gpu_mem_types)) gpu_mem_types[memtype].size += tmp->memdesc.size; diff --git a/drivers/soc/qcom/hgsl/hgsl_memory.c b/drivers/soc/qcom/hgsl/hgsl_memory.c index 25e7eae25ee7..30490365ae90 100644 --- a/drivers/soc/qcom/hgsl/hgsl_memory.c +++ b/drivers/soc/qcom/hgsl/hgsl_memory.c @@ -612,24 +612,6 @@ void hgsl_sharedmem_free(struct hgsl_mem_node *mem_node) } -struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head, - uint64_t gpuaddr, uint64_t size) -{ - struct hgsl_mem_node *node_found = NULL; - struct hgsl_mem_node *tmp = NULL; - uint64_t end = gpuaddr + size; - - list_for_each_entry(tmp, head, node) { - if ((tmp->memdesc.gpuaddr <= gpuaddr) - && ((tmp->memdesc.gpuaddr + tmp->memdesc.size) >= end)) { - node_found = tmp; - break; - } - } - - return node_found; -} - void *hgsl_mem_node_zalloc(bool iocoherency) { struct hgsl_mem_node *mem_node = NULL; @@ -644,4 +626,64 @@ out: return mem_node; } +int hgsl_mem_add_node(struct rb_root *rb_root, + struct hgsl_mem_node *mem_node) +{ + struct rb_node **cur; + struct rb_node *parent = NULL; + struct hgsl_mem_node *node = NULL; + int ret = 0; + + cur = &rb_root->rb_node; + while (*cur) { + parent = *cur; + node = rb_entry(parent, struct hgsl_mem_node, mem_rb_node); + if (mem_node->memdesc.gpuaddr > node->memdesc.gpuaddr) + cur = &parent->rb_right; + else if (mem_node->memdesc.gpuaddr < node->memdesc.gpuaddr) + cur = &parent->rb_left; + else { + LOGE("Duplicate gpuaddr: 0x%llx", + mem_node->memdesc.gpuaddr); + ret = -EEXIST; + goto out; + } + } + + rb_link_node(&mem_node->mem_rb_node, parent, cur); + rb_insert_color(&mem_node->mem_rb_node, rb_root); +out: + return ret; +} + +struct hgsl_mem_node *hgsl_mem_find_node_locked( + struct rb_root *rb_root, uint64_t gpuaddr, + uint64_t size, bool accurate) +{ + struct rb_node *cur = NULL; + struct hgsl_mem_node *node_found = NULL; + + cur = rb_root->rb_node; + while (cur) { + node_found = rb_entry(cur, struct hgsl_mem_node, mem_rb_node); + if (hgsl_mem_range_inspect( + node_found->memdesc.gpuaddr, gpuaddr, + node_found->memdesc.size64, size, + accurate)) { + return node_found; + } else if (node_found->memdesc.gpuaddr < gpuaddr) + cur = cur->rb_right; + else if (node_found->memdesc.gpuaddr > gpuaddr) + cur = cur->rb_left; + else { + LOGE("Invalid addr: 0x%llx size: [0x%llx 0x%llx]", + gpuaddr, size, node_found->memdesc.size64); + goto out; + } + } + +out: + return NULL; +} + MODULE_IMPORT_NS(DMA_BUF); diff --git a/drivers/soc/qcom/hgsl/hgsl_memory.h b/drivers/soc/qcom/hgsl/hgsl_memory.h index 98d2d3f26ab6..58061eb4d60b 100644 --- a/drivers/soc/qcom/hgsl/hgsl_memory.h +++ b/drivers/soc/qcom/hgsl/hgsl_memory.h @@ -10,6 +10,7 @@ #include #include #include +#include #include "hgsl_types.h" #include "hgsl_utils.h" @@ -49,7 +50,7 @@ enum gsl_user_mem_type_t { }; struct hgsl_mem_node { - struct list_head node; + struct rb_node mem_rb_node; struct gsl_memdesc_t memdesc; int32_t fd; uint32_t export_id; @@ -79,9 +80,21 @@ int hgsl_mem_cache_op(struct device *dev, struct hgsl_mem_node *mem_node, void hgsl_put_sgt(struct hgsl_mem_node *mem_node, bool internal); -struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head, - uint64_t gpuaddr, uint64_t size); - void *hgsl_mem_node_zalloc(bool iocoherency); +int hgsl_mem_add_node(struct rb_root *rb_root, + struct hgsl_mem_node *mem_node); +struct hgsl_mem_node *hgsl_mem_find_node_locked( + struct rb_root *rb_root, uint64_t gpuaddr, + uint64_t size, bool accurate); + +static inline bool hgsl_mem_range_inspect(uint64_t da1, uint64_t da2, + uint64_t size1, uint64_t size2, bool accurate) +{ + if (accurate) + return ((da1 == da2) && (size1 == size2)); + else + return ((da1 <= da2) && (da1 + size1) >= (da2 + size2)); +} + #endif diff --git a/drivers/soc/qcom/msm-spm.c b/drivers/soc/qcom/msm-spm.c new file mode 100644 index 000000000000..f1acf2c99e82 --- /dev/null +++ b/drivers/soc/qcom/msm-spm.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#include "spm_driver.h" + +#define MSM_SPM_PMIC_STATE_IDLE 0 + +enum { + MSM_SPM_DEBUG_SHADOW = 1U << 0, + MSM_SPM_DEBUG_VCTL = 1U << 1, +}; + +static int msm_spm_debug_mask; +module_param_named( + debug_mask, msm_spm_debug_mask, int, 0664 +); + +struct saw2_data { + const char *ver_name; + uint32_t major; + uint32_t minor; + uint32_t *spm_reg_offset_ptr; +}; + +static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = { + [MSM_SPM_REG_SAW_SECURE] = 0x00, + [MSM_SPM_REG_SAW_ID] = 0x04, + [MSM_SPM_REG_SAW_CFG] = 0x08, + [MSM_SPM_REG_SAW_SPM_STS] = 0x0C, + [MSM_SPM_REG_SAW_AVS_STS] = 0x10, + [MSM_SPM_REG_SAW_PMIC_STS] = 0x14, + [MSM_SPM_REG_SAW_RST] = 0x18, + [MSM_SPM_REG_SAW_VCTL] = 0x1C, + [MSM_SPM_REG_SAW_AVS_CTL] = 0x20, + [MSM_SPM_REG_SAW_AVS_LIMIT] = 0x24, + [MSM_SPM_REG_SAW_AVS_DLY] = 0x28, + [MSM_SPM_REG_SAW_AVS_HYSTERESIS] = 0x2C, + [MSM_SPM_REG_SAW_SPM_CTL] = 0x30, + [MSM_SPM_REG_SAW_SPM_DLY] = 0x34, + [MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40, + [MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44, + [MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48, + [MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C, + [MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50, + [MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54, + [MSM_SPM_REG_SAW_PMIC_DATA_6] = 0x58, + [MSM_SPM_REG_SAW_PMIC_DATA_7] = 0x5C, + [MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x80, + [MSM_SPM_REG_SAW_VERSION] = 0xFD0, +}; + +static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = { + [MSM_SPM_REG_SAW_SECURE] = 0x00, + [MSM_SPM_REG_SAW_ID] = 0x04, + [MSM_SPM_REG_SAW_CFG] = 0x08, + [MSM_SPM_REG_SAW_SPM_STS] = 0x0C, + [MSM_SPM_REG_SAW_AVS_STS] = 0x10, + [MSM_SPM_REG_SAW_PMIC_STS] = 0x14, + [MSM_SPM_REG_SAW_RST] = 0x18, + [MSM_SPM_REG_SAW_VCTL] = 0x1C, + [MSM_SPM_REG_SAW_AVS_CTL] = 0x20, + [MSM_SPM_REG_SAW_AVS_LIMIT] = 0x24, + [MSM_SPM_REG_SAW_AVS_DLY] = 0x28, + [MSM_SPM_REG_SAW_AVS_HYSTERESIS] = 0x2C, + [MSM_SPM_REG_SAW_SPM_CTL] = 0x30, + [MSM_SPM_REG_SAW_SPM_DLY] = 0x34, + [MSM_SPM_REG_SAW_STS2] = 0x38, + [MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40, + [MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44, + [MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48, + [MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C, + [MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50, + [MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54, + [MSM_SPM_REG_SAW_PMIC_DATA_6] = 0x58, + [MSM_SPM_REG_SAW_PMIC_DATA_7] = 0x5C, + [MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x400, + [MSM_SPM_REG_SAW_VERSION] = 0xFD0, +}; + +static uint32_t msm_spm_reg_offsets_saw2_v4_1[MSM_SPM_REG_NR] = { + [MSM_SPM_REG_SAW_SECURE] = 0xC00, + [MSM_SPM_REG_SAW_ID] = 0xC04, + [MSM_SPM_REG_SAW_STS2] = 0xC10, + [MSM_SPM_REG_SAW_SPM_STS] = 0xC0C, + [MSM_SPM_REG_SAW_AVS_STS] = 0xC14, + [MSM_SPM_REG_SAW_PMIC_STS] = 0xC18, + [MSM_SPM_REG_SAW_RST] = 0xC1C, + [MSM_SPM_REG_SAW_VCTL] = 0x900, + [MSM_SPM_REG_SAW_AVS_CTL] = 0x904, + [MSM_SPM_REG_SAW_AVS_LIMIT] = 0x908, + [MSM_SPM_REG_SAW_AVS_DLY] = 0x90C, + [MSM_SPM_REG_SAW_SPM_CTL] = 0x0, + [MSM_SPM_REG_SAW_SPM_DLY] = 0x4, + [MSM_SPM_REG_SAW_CFG] = 0x0C, + [MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40, + [MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44, + [MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48, + [MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C, + [MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50, + [MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54, + [MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x400, + [MSM_SPM_REG_SAW_VERSION] = 0xFD0, +}; + +static struct saw2_data saw2_info[] = { + [0] = { + "SAW_v2.1", + 0x2, + 0x1, + msm_spm_reg_offsets_saw2_v2_1, + }, + [1] = { + "SAW_v2.3", + 0x3, + 0x0, + msm_spm_reg_offsets_saw2_v3_0, + }, + [2] = { + "SAW_v3.0", + 0x1, + 0x0, + msm_spm_reg_offsets_saw2_v3_0, + }, + [3] = { + "SAW_v4.0", + 0x4, + 0x1, + msm_spm_reg_offsets_saw2_v4_1, + }, +}; + +static uint32_t num_pmic_data; + +static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev, + unsigned int reg_index) +{ + if (!dev || reg_index >= MSM_SPM_REG_NR) + return; + + __raw_writel(dev->reg_shadow[reg_index], + dev->reg_base_addr + dev->reg_offsets[reg_index]); +} + +static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev, + unsigned int reg_index) +{ + if (!dev || reg_index >= MSM_SPM_REG_NR) + return; + + dev->reg_shadow[reg_index] = + __raw_readl(dev->reg_base_addr + + dev->reg_offsets[reg_index]); +} + +static inline uint32_t msm_spm_drv_get_num_spm_entry( + struct msm_spm_driver_data *dev) +{ + if (!dev) + return -ENODEV; + + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID); + return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF; +} + +static inline void msm_spm_drv_set_start_addr( + struct msm_spm_driver_data *dev, uint32_t ctl) +{ + dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] = ctl; +} + +static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID); + return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 2) & 0x1; +} + +static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev, + uint32_t vlevel, uint32_t vctl_port) +{ + unsigned int pmic_data = 0; + + pmic_data |= vlevel; + pmic_data |= (vctl_port & 0x7) << 16; + + dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF; + dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data; + + dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] &= ~0x700FF; + dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] |= pmic_data; + + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_PMIC_DATA_3); +} + +static inline uint32_t msm_spm_drv_get_num_pmic_data( + struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID); + mb(); /* Ensure we flush */ + return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 4) & 0x7; +} + +static inline uint32_t msm_spm_drv_get_sts_pmic_state( + struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS); + return (dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] >> 16) & + 0x03; +} + +uint32_t msm_spm_drv_get_sts_curr_pmic_data( + struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS); + return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0x300FF; +} + +static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev, + uint32_t *major, uint32_t *minor) +{ + uint32_t val = 0; + + dev->reg_shadow[MSM_SPM_REG_SAW_VERSION] = + __raw_readl(dev->reg_base_addr + dev->ver_reg); + + val = dev->reg_shadow[MSM_SPM_REG_SAW_VERSION]; + + *major = (val >> 28) & 0xF; + *minor = (val >> 16) & 0xFFF; +} + +inline int msm_spm_drv_set_spm_enable( + struct msm_spm_driver_data *dev, bool enable) +{ + uint32_t value = enable ? 0x01 : 0x00; + + if (!dev) + return -EINVAL; + + if ((dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] & 0x01) ^ value) { + + dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x1; + dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= value; + + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); + wmb(); /* Ensure we flush */ + } + return 0; +} + +int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev) +{ + if (!dev) + return -EINVAL; + + return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x01; +} + +int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev, + bool enable) +{ + uint32_t value = enable ? 0x1 : 0x0; + + if (!dev) + return -EINVAL; + + if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x1) ^ value) { + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x1; + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value; + + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + } + + return 0; +} + +int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev, + uint32_t min_lvl, uint32_t max_lvl) +{ + uint32_t value = (max_lvl & 0xff) << 16 | (min_lvl & 0xff); + + if (!dev) + return -EINVAL; + + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_LIMIT] = value; + + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_LIMIT); + + return 0; +} + +static int msm_spm_drv_avs_irq_mask(enum msm_spm_avs_irq irq) +{ + switch (irq) { + case MSM_SPM_AVS_IRQ_MIN: + return BIT(1); + case MSM_SPM_AVS_IRQ_MAX: + return BIT(2); + default: + return -EINVAL; + } +} + +int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev, + enum msm_spm_avs_irq irq, bool enable) +{ + int mask = msm_spm_drv_avs_irq_mask(irq); + uint32_t value; + + if (!dev) + return -EINVAL; + else if (mask < 0) + return mask; + + value = enable ? mask : 0; + + if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) ^ value) { + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask; + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + } + + return 0; +} + +int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev, + enum msm_spm_avs_irq irq) +{ + int mask = msm_spm_drv_avs_irq_mask(irq); + + if (!dev) + return -EINVAL; + else if (mask < 0) + return mask; + + if (dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) { + /* + * The interrupt status is cleared by disabling and then + * re-enabling the interrupt. + */ + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= mask; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + } + + return 0; +} + +void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev) +{ + int i; + int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev); + + if (!dev) { + __WARN(); + return; + } + + for (i = 0; i < num_spm_entry; i++) { + __raw_writel(dev->reg_seq_entry_shadow[i], + dev->reg_base_addr + + dev->reg_offsets[MSM_SPM_REG_SAW_SEQ_ENTRY] + + 4 * i); + } + mb(); /* Ensure we flush */ +} + +void dump_regs(struct msm_spm_driver_data *dev, int cpu) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS); + mb(); /* Ensure we flush */ + pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_STS: 0x%x\n", cpu, + dev->reg_shadow[MSM_SPM_REG_SAW_SPM_STS]); + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); + mb(); /* Ensure we flush */ + pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_CTL: 0x%x\n", cpu, + dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL]); +} + +int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev, + uint8_t *cmd, uint32_t *offset) +{ + uint32_t cmd_w; + uint32_t offset_w = *offset / 4; + uint8_t last_cmd; + + if (!cmd) + return -EINVAL; + + while (1) { + int i; + + cmd_w = 0; + last_cmd = 0; + cmd_w = dev->reg_seq_entry_shadow[offset_w]; + + for (i = (*offset % 4); i < 4; i++) { + last_cmd = *(cmd++); + cmd_w |= last_cmd << (i * 8); + (*offset)++; + if (last_cmd == 0x0f) + break; + } + + dev->reg_seq_entry_shadow[offset_w++] = cmd_w; + if (last_cmd == 0x0f) + break; + } + + return 0; +} + +int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev, + uint32_t ctl) +{ + + /* SPM is configured to reset start address to zero after end of Program + */ + if (!dev) + return -EINVAL; + + msm_spm_drv_set_start_addr(dev, ctl); + + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); + wmb(); /* Ensure we flush */ + + if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) { + int i; + + for (i = 0; i < MSM_SPM_REG_NR; i++) + pr_info("%s: reg %02x = 0x%08x\n", __func__, + dev->reg_offsets[i], dev->reg_shadow[i]); + } + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS); + + return 0; +} + +uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS); + return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0xFF; +} + +#ifdef CONFIG_MSM_AVS_HW +static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & BIT(0); +} + +static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) +{ + msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~BIT(27); + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); +} + +static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) +{ + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= BIT(27); + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); +} + +static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev, + unsigned int vlevel) +{ + vlevel &= 0x3f; + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x7efc00; + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= ((vlevel - 4) << 10); + dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= (vlevel << 17); + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL); +} + +#else +static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev) +{ + return false; +} + +static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { } + +static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { } + +static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev, + unsigned int vlevel) +{ +} +#endif + +static inline int msm_spm_drv_validate_data(struct msm_spm_driver_data *dev, + unsigned int vlevel, int vctl_port) +{ + int timeout_us = dev->vctl_timeout_us; + uint32_t new_level; + + /* Confirm the voltage we set was what hardware sent and + * FSM is idle. + */ + do { + udelay(1); + new_level = msm_spm_drv_get_sts_curr_pmic_data(dev); + + /** + * VCTL_PORT has to be 0, for vlevel to be updated. + * If port is not 0, check for PMIC_STATE only. + */ + + if (((new_level & 0x30000) == MSM_SPM_PMIC_STATE_IDLE) && + (vctl_port || ((new_level & 0xFF) == vlevel))) + break; + } while (--timeout_us); + + if (!timeout_us) { + pr_err("Wrong level %#x\n", new_level); + return -EIO; + } + + if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) + pr_info("%s: done, remaining timeout %u us\n", + __func__, timeout_us); + + return 0; +} + +int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel) +{ + uint32_t vlevel_set = vlevel; + bool avs_enabled; + int ret = 0; + + if (!dev) + return -EINVAL; + + avs_enabled = msm_spm_drv_is_avs_enabled(dev); + + if (!msm_spm_pmic_arb_present(dev)) + return -ENODEV; + + if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) + pr_info("%s: requesting vlevel %#x\n", __func__, vlevel); + + if (avs_enabled) + msm_spm_drv_disable_avs(dev); + + if (dev->vctl_port_ub >= 0) { + /** + * VCTL can send 8bit voltage level at once. + * Send lower 8bit first, vlevel change happens + * when upper 8bit is sent. + */ + vlevel = vlevel_set & 0xFF; + } + + /* Kick the state machine back to idle */ + dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST); + + msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port); + + ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port); + if (ret) + goto set_vdd_bail; + + if (dev->vctl_port_ub >= 0) { + /* Send upper 8bit of voltage level */ + vlevel = (vlevel_set >> 8) & 0xFF; + + /* Kick the state machine back to idle */ + dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST); + + /* + * Steps for sending for vctl port other than '0' + * Write VCTL register with pmic data and address index + * Perform system barrier + * Wait for 1us + * Read PMIC_STS register to make sure operation is complete + */ + msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port_ub); + + mb(); /* To make sure data is sent before checking status */ + + ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port_ub); + if (ret) + goto set_vdd_bail; + } + + /* Set AVS min/max */ + if (avs_enabled) { + msm_spm_drv_set_avs_vlevel(dev, vlevel_set); + msm_spm_drv_enable_avs(dev); + } + + return ret; + +set_vdd_bail: + if (avs_enabled) + msm_spm_drv_enable_avs(dev); + + pr_err("%s: failed %#x vlevel setting in timeout %uus\n", + __func__, vlevel_set, dev->vctl_timeout_us); + return -EIO; +} + +static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev, + enum msm_spm_pmic_port port) +{ + int index = -1; + + switch (port) { + case MSM_SPM_PMIC_VCTL_PORT: + index = dev->vctl_port; + break; + case MSM_SPM_PMIC_PHASE_PORT: + index = dev->phase_port; + break; + case MSM_SPM_PMIC_PFM_PORT: + index = dev->pfm_port; + break; + default: + break; + } + + return index; +} + +int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev, + enum msm_spm_pmic_port port, unsigned int data) +{ + unsigned int pmic_data = 0; + unsigned int timeout_us = 0; + int index = 0; + + if (!msm_spm_pmic_arb_present(dev)) + return -ENODEV; + + index = msm_spm_drv_get_pmic_port(dev, port); + if (index < 0) + return -ENODEV; + + pmic_data |= data & 0xFF; + pmic_data |= (index & 0x7) << 16; + + dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF; + dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data; + msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); + mb(); /* Ensure we flush */ + + timeout_us = dev->vctl_timeout_us; + /** + * Confirm the pmic data set was what hardware sent by + * checking the PMIC FSM state. + * We cannot use the sts_pmic_data and check it against + * the value like we do fot set_vdd, since the PMIC_STS + * is only updated for SAW_VCTL sent with port index 0. + */ + do { + if (msm_spm_drv_get_sts_pmic_state(dev) == + MSM_SPM_PMIC_STATE_IDLE) + break; + udelay(1); + } while (--timeout_us); + + if (!timeout_us) { + pr_err("%s: failed, remaining timeout %u us, data %d\n", + __func__, timeout_us, data); + return -EIO; + } + + return 0; +} + +void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq_write) +{ + int i; + + if (seq_write) + msm_spm_drv_flush_seq_entry(dev); + + for (i = 0; i < MSM_SPM_REG_SAW_PMIC_DATA_0 + num_pmic_data; i++) + msm_spm_drv_load_shadow(dev, i); + + for (i = MSM_SPM_REG_NR_INITIALIZE + 1; i < MSM_SPM_REG_NR; i++) + msm_spm_drv_load_shadow(dev, i); +} + +int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev, + struct msm_spm_platform_data *data) +{ + int i; + bool found = false; + + dev->ver_reg = data->ver_reg; + dev->reg_base_addr = data->reg_base_addr; + msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor); + for (i = 0; i < ARRAY_SIZE(saw2_info); i++) + if (dev->major == saw2_info[i].major && + dev->minor == saw2_info[i].minor) { + pr_debug("%s: Version found\n", + saw2_info[i].ver_name); + dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr; + found = true; + break; + } + + if (!found) { + pr_err("%s: No SAW version found\n", __func__); + WARN_ON(!found); + } + return 0; +} + +void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id, + int val) +{ + dev->reg_shadow[id] = val; + msm_spm_drv_flush_shadow(dev, id); + /* Complete the above writes before other accesses */ + mb(); +} + +int msm_spm_drv_init(struct msm_spm_driver_data *dev, + struct msm_spm_platform_data *data) +{ + int num_spm_entry; + + if (!dev || !data) + return -ENODEV; + + dev->vctl_port = data->vctl_port; + dev->vctl_port_ub = data->vctl_port_ub; + dev->phase_port = data->phase_port; + dev->pfm_port = data->pfm_port; + dev->reg_base_addr = data->reg_base_addr; + memcpy(dev->reg_shadow, data->reg_init_values, + sizeof(data->reg_init_values)); + + dev->vctl_timeout_us = data->vctl_timeout_us; + + + if (!num_pmic_data) + num_pmic_data = msm_spm_drv_get_num_pmic_data(dev); + + num_spm_entry = msm_spm_drv_get_num_spm_entry(dev); + + dev->reg_seq_entry_shadow = + kcalloc(num_spm_entry, sizeof(*dev->reg_seq_entry_shadow), + GFP_KERNEL); + + if (!dev->reg_seq_entry_shadow) + return -ENOMEM; + + return 0; +} diff --git a/drivers/soc/qcom/spm_devices.c b/drivers/soc/qcom/spm_devices.c new file mode 100644 index 000000000000..3fb6bef38b13 --- /dev/null +++ b/drivers/soc/qcom/spm_devices.c @@ -0,0 +1,1003 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "spm_driver.h" + +#define VDD_DEFAULT 0xDEADF00D +#define SLP_CMD_BIT 17 +#define PC_MODE_BIT 16 +#define RET_MODE_BIT 15 +#define EVENT_SYNC_BIT 24 +#define ISAR_BIT 3 +#define SPM_EN_BIT 0 + +struct msm_spm_power_modes { + uint32_t mode; + uint32_t ctl; +}; + +struct msm_spm_device { + struct list_head list; + bool initialized; + const char *name; + struct msm_spm_driver_data reg_data; + struct msm_spm_power_modes *modes; + uint32_t num_modes; + uint32_t cpu_vdd; + struct cpumask mask; + void __iomem *q2s_reg; + bool qchannel_ignore; + bool allow_rpm_hs; + bool use_spm_clk_gating; + bool use_qchannel_for_wfi; + void __iomem *flush_base_addr; + void __iomem *slpreq_base_addr; +}; + +struct msm_spm_vdd_info { + struct msm_spm_device *vctl_dev; + uint32_t vlevel; + int err; +}; + +static LIST_HEAD(spm_list); +static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device); +static DEFINE_PER_CPU(struct msm_spm_device *, cpu_vctl_device); + +static void msm_spm_smp_set_vdd(void *data) +{ + struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data; + struct msm_spm_device *dev = info->vctl_dev; + + dev->cpu_vdd = info->vlevel; + info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel); +} + +/** + * msm_spm_probe_done(): Verify and return the status of the cpu(s) and l2 + * probe. + * Return: 0 if all spm devices have been probed, else return -EPROBE_DEFER. + * if probe failed, then return the err number for that failure. + */ +int msm_spm_probe_done(void) +{ + struct msm_spm_device *dev; + int cpu; + int ret = 0; + + for_each_possible_cpu(cpu) { + dev = per_cpu(cpu_vctl_device, cpu); + if (!dev) + return -EPROBE_DEFER; + + ret = IS_ERR(dev); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(msm_spm_probe_done); + +void msm_spm_dump_regs(unsigned int cpu) +{ + dump_regs(&per_cpu(msm_cpu_spm_device, cpu).reg_data, cpu); +} + +/** + * msm_spm_set_vdd(): Set core voltage + * @cpu: core id + * @vlevel: Encoded PMIC data. + * + * Return: 0 on success or -(ERRNO) on failure. + */ +int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) +{ + struct msm_spm_vdd_info info; + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + int ret; + + if (!dev) + return -EPROBE_DEFER; + + ret = IS_ERR(dev); + if (ret) + return ret; + + info.vctl_dev = dev; + info.vlevel = vlevel; + + ret = smp_call_function_any(&dev->mask, msm_spm_smp_set_vdd, &info, + true); + if (ret) + return ret; + + return info.err; +} +EXPORT_SYMBOL_GPL(msm_spm_set_vdd); + +/** + * msm_spm_get_vdd(): Get core voltage + * @cpu: core id + * @return: Returns encoded PMIC data. + */ +int msm_spm_get_vdd(unsigned int cpu) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -EPROBE_DEFER; + + return msm_spm_drv_get_vdd(&dev->reg_data) ? : -EINVAL; +} +EXPORT_SYMBOL_GPL(msm_spm_get_vdd); + +static void msm_spm_config_q2s(struct msm_spm_device *dev, unsigned int mode) +{ + uint32_t spm_legacy_mode = 0; + uint32_t qchannel_ignore = 0; + uint32_t val = 0; + + if (!dev->q2s_reg) + return; + + switch (mode) { + case MSM_SPM_MODE_DISABLED: + case MSM_SPM_MODE_CLOCK_GATING: + qchannel_ignore = !dev->use_qchannel_for_wfi; + spm_legacy_mode = 0; + break; + case MSM_SPM_MODE_RETENTION: + qchannel_ignore = 0; + spm_legacy_mode = 0; + break; + case MSM_SPM_MODE_GDHS: + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + qchannel_ignore = dev->qchannel_ignore; + spm_legacy_mode = 1; + break; + default: + break; + } + + val = spm_legacy_mode << 2 | qchannel_ignore << 1; + __raw_writel(val, dev->q2s_reg); + mb(); /* Ensure flush */ +} + +static void msm_spm_config_hw_flush(struct msm_spm_device *dev, + unsigned int mode) +{ + uint32_t val = 0; + + if (!dev->flush_base_addr) + return; + + switch (mode) { + case MSM_SPM_MODE_FASTPC: + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + val = BIT(0); + break; + default: + break; + } + + __raw_writel(val, dev->flush_base_addr); +} + +static void msm_spm_config_slpreq(struct msm_spm_device *dev, + unsigned int mode) +{ + uint32_t val = 0; + + if (!dev->slpreq_base_addr) + return; + + switch (mode) { + case MSM_SPM_MODE_FASTPC: + case MSM_SPM_MODE_GDHS: + case MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE: + case MSM_SPM_MODE_POWER_COLLAPSE: + val = BIT(4); + break; + default: + break; + } + + val = (__raw_readl(dev->slpreq_base_addr) & ~BIT(4)) | val; + __raw_writel(val, dev->slpreq_base_addr); +} + +static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm, bool set_spm_enable) +{ + uint32_t i; + int ret = -EINVAL; + uint32_t ctl = 0; + + if (!dev) { + pr_err("dev is NULL\n"); + return -ENODEV; + } + + if (!dev->initialized) + return -ENXIO; + + if (!dev->num_modes) + return 0; + + if (mode == MSM_SPM_MODE_DISABLED && set_spm_enable) { + ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false); + } else { + if (set_spm_enable) + ret = msm_spm_drv_set_spm_enable(&dev->reg_data, true); + for (i = 0; i < dev->num_modes; i++) { + if (dev->modes[i].mode != mode) + continue; + + ctl = dev->modes[i].ctl; + if (!dev->allow_rpm_hs && notify_rpm) + ctl &= ~BIT(SLP_CMD_BIT); + + break; + } + ret = msm_spm_drv_set_low_power_mode(&dev->reg_data, ctl); + } + + msm_spm_config_q2s(dev, mode); + msm_spm_config_hw_flush(dev, mode); + msm_spm_config_slpreq(dev, mode); + + return ret; +} + +static int msm_spm_dev_init(struct msm_spm_device *dev, + struct msm_spm_platform_data *data) +{ + int i, ret = -ENOMEM; + uint32_t offset = 0; + + dev->cpu_vdd = VDD_DEFAULT; + dev->num_modes = data->num_modes; + dev->modes = kmalloc_array( + dev->num_modes, sizeof(struct msm_spm_power_modes), + GFP_KERNEL); + + if (!dev->modes) + goto spm_failed_malloc; + + ret = msm_spm_drv_init(&dev->reg_data, data); + + if (ret) + goto spm_failed_init; + + for (i = 0; i < dev->num_modes; i++) { + + /* Default offset is 0 and gets updated as we write more + * sequences into SPM + */ + dev->modes[i].ctl = data->modes[i].ctl | ((offset & 0x1FF) + << 4); + ret = msm_spm_drv_write_seq_data(&dev->reg_data, + data->modes[i].cmd, &offset); + if (ret < 0) + goto spm_failed_init; + + dev->modes[i].mode = data->modes[i].mode; + } + + msm_spm_drv_reinit(&dev->reg_data, dev->num_modes ? true : false); + + dev->initialized = true; + + return 0; + +spm_failed_init: + kfree(dev->modes); +spm_failed_malloc: + return ret; +} + +/** + * msm_spm_turn_on_cpu_rail(): Power on cpu rail before turning on core + * @node: The SPM node that controls the voltage for the CPU + * @val: The value to be set on the rail + * @cpu: The cpu for this with rail is being powered on + */ +int msm_spm_turn_on_cpu_rail(struct device_node *vctl_node, + unsigned int val, int cpu, int vctl_offset) +{ + uint32_t timeout = 2000; /* delay for voltage to settle on the core */ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + void __iomem *base; + + base = of_iomap(vctl_node, 1); + if (base) { + /* + * Program Q2S to disable SPM legacy mode and ignore Q2S + * channel requests. + * bit[1] = qchannel_ignore = 1 + * bit[2] = spm_legacy_mode = 0 + */ + writel_relaxed(0x2, base); + mb(); /* Ensure flush */ + iounmap(base); + } + + base = of_iomap(vctl_node, 0); + if (!base) + return -ENOMEM; + + if (dev && (dev->cpu_vdd != VDD_DEFAULT)) + return 0; + + /* Set the CPU supply regulator voltage */ + val = (val & 0xFF); + writel_relaxed(val, base + vctl_offset); + mb(); /* Ensure flush */ + udelay(timeout); + + /* Enable the CPU supply regulator*/ + val = 0x30080; + writel_relaxed(val, base + vctl_offset); + mb(); /* Ensure flush */ + udelay(timeout); + + iounmap(base); + + return 0; +} +EXPORT_SYMBOL_GPL(msm_spm_turn_on_cpu_rail); + +void msm_spm_reinit(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + msm_spm_drv_reinit( + &per_cpu(msm_cpu_spm_device.reg_data, cpu), true); +} +EXPORT_SYMBOL_GPL(msm_spm_reinit); + +/* + * msm_spm_is_mode_avail() - Specifies if a mode is available for the cpu + * It should only be used to decide a mode before lpm driver is probed. + * @mode: SPM LPM mode to be selected + */ +bool msm_spm_is_mode_avail(unsigned int mode) +{ + struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device); + int i; + + for (i = 0; i < dev->num_modes; i++) { + if (dev->modes[i].mode == mode) + return true; + } + + return false; +} + +/** + * msm_spm_is_avs_enabled() - Functions returns 1 if AVS is enabled and + * 0 if it is not. + * @cpu: specifies cpu's avs should be read + * + * Returns errno in case of failure or AVS enable state otherwise + */ +int msm_spm_is_avs_enabled(unsigned int cpu) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_get_avs_enable(&dev->reg_data); +} +EXPORT_SYMBOL_GPL(msm_spm_is_avs_enabled); + +/** + * msm_spm_avs_enable() - Enables AVS on the SAW that controls this cpu's + * voltage. + * @cpu: specifies which cpu's avs should be enabled + * + * Returns errno in case of failure or 0 if successful + */ +int msm_spm_avs_enable(unsigned int cpu) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_avs_enable(&dev->reg_data, true); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_enable); + +/** + * msm_spm_avs_disable() - Disables AVS on the SAW that controls this cpu's + * voltage. + * @cpu: specifies which cpu's avs should be enabled + * + * Returns errno in case of failure or 0 if successful + */ +int msm_spm_avs_disable(unsigned int cpu) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_avs_enable(&dev->reg_data, false); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_disable); + +/** + * msm_spm_avs_set_limit() - Set maximum and minimum AVS limits on the + * SAW that controls this cpu's voltage. + * @cpu: specify which cpu's avs should be configured + * @min_lvl: specifies the minimum PMIC output voltage control register + * value that may be sent to the PMIC + * @max_lvl: specifies the maximum PMIC output voltage control register + * value that may be sent to the PMIC + * Returns errno in case of failure or 0 if successful + */ +int msm_spm_avs_set_limit(unsigned int cpu, + uint32_t min_lvl, uint32_t max_lvl) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_avs_limit(&dev->reg_data, min_lvl, max_lvl); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_set_limit); + +/** + * msm_spm_avs_enable_irq() - Enable an AVS interrupt + * @cpu: specifies which CPU's AVS should be configured + * @irq: specifies which interrupt to enable + * + * Returns errno in case of failure or 0 if successful. + */ +int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, true); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_enable_irq); + +/** + * msm_spm_avs_disable_irq() - Disable an AVS interrupt + * @cpu: specifies which CPU's AVS should be configured + * @irq: specifies which interrupt to disable + * + * Returns errno in case of failure or 0 if successful. + */ +int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_avs_irq_enable(&dev->reg_data, irq, false); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_disable_irq); + +/** + * msm_spm_avs_clear_irq() - Clear a latched AVS interrupt + * @cpu: specifies which CPU's AVS should be configured + * @irq: specifies which interrupt to clear + * + * Returns errno in case of failure or 0 if successful. + */ +int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_avs_clear_irq(&dev->reg_data, irq); +} +EXPORT_SYMBOL_GPL(msm_spm_avs_clear_irq); + +/** + * msm_spm_set_low_power_mode() - Configure SPM start address for low power mode + * @mode: SPM LPM mode to enter + * @notify_rpm: Notify RPM in this mode + */ +int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm) +{ + struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device); + + return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true); +} +EXPORT_SYMBOL_GPL(msm_spm_set_low_power_mode); + +void msm_spm_set_rpm_hs(bool allow_rpm_hs) +{ + struct msm_spm_device *dev = this_cpu_ptr(&msm_cpu_spm_device); + + dev->allow_rpm_hs = allow_rpm_hs; +} +EXPORT_SYMBOL_GPL(msm_spm_set_rpm_hs); + +int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm) +{ + return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, false); +} + +/** + * msm_spm_init(): Board initalization function + * @data: platform specific SPM register configuration data + * @nr_devs: Number of SPM devices being initialized + */ +int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs) +{ + unsigned int cpu; + int ret = 0; + + if ((nr_devs < num_possible_cpus()) || !data) + return -EINVAL; + + for_each_possible_cpu(cpu) { + struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu); + + ret = msm_spm_dev_init(dev, &data[cpu]); + if (ret < 0) { + pr_warn("%s():failed CPU:%u ret:%d\n", __func__, + cpu, ret); + break; + } + } + + return ret; +} + +struct msm_spm_device *msm_spm_get_device_by_name(const char *name) +{ + struct list_head *list; + + list_for_each(list, &spm_list) { + struct msm_spm_device *dev + = list_entry(list, typeof(*dev), list); + if (dev->name && !strcmp(dev->name, name)) + return dev; + } + return ERR_PTR(-ENODEV); +} + +int msm_spm_config_low_power_mode(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm) +{ + return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm, true); +} +#ifdef CONFIG_MSM_L2_SPM + +/** + * msm_spm_apcs_set_phase(): Set number of SMPS phases. + * @cpu: cpu which is requesting the change in number of phases. + * @phase_cnt: Number of phases to be set active + */ +int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_pmic_data(&dev->reg_data, + MSM_SPM_PMIC_PHASE_PORT, phase_cnt); +} +EXPORT_SYMBOL_GPL(msm_spm_apcs_set_phase); + +/** msm_spm_enable_fts_lpm() : Enable FTS to switch to low power + * when the cores are in low power modes + * @cpu: cpu that is entering low power mode. + * @mode: The mode configuration for FTS + */ +int msm_spm_enable_fts_lpm(int cpu, uint32_t mode) +{ + struct msm_spm_device *dev = per_cpu(cpu_vctl_device, cpu); + + if (!dev) + return -ENXIO; + + return msm_spm_drv_set_pmic_data(&dev->reg_data, + MSM_SPM_PMIC_PFM_PORT, mode); +} +EXPORT_SYMBOL_GPL(msm_spm_enable_fts_lpm); + +#endif + +static int get_cpu_id(struct device_node *node) +{ + struct device_node *cpu_node; + u32 cpu; + char *key = "qcom,cpu"; + + cpu_node = of_parse_phandle(node, key, 0); + if (cpu_node) { + for_each_possible_cpu(cpu) { + if (of_get_cpu_node(cpu, NULL) == cpu_node) + return cpu; + } + } else + return num_possible_cpus(); + + return -EINVAL; +} + +static struct msm_spm_device *msm_spm_get_device(struct platform_device *pdev) +{ + struct msm_spm_device *dev = NULL; + const char *val = NULL; + char *key = "qcom,name"; + int cpu = get_cpu_id(pdev->dev.of_node); + + if ((cpu >= 0) && cpu < num_possible_cpus()) + dev = &per_cpu(msm_cpu_spm_device, cpu); + else if (cpu == num_possible_cpus()) + dev = devm_kzalloc(&pdev->dev, sizeof(struct msm_spm_device), + GFP_KERNEL); + + if (!dev) + return NULL; + + if (of_property_read_string(pdev->dev.of_node, key, &val)) { + pr_err("%s(): Cannot find a required node key:%s\n", + __func__, key); + return NULL; + } + dev->name = val; + list_add(&dev->list, &spm_list); + + return dev; +} + +static void get_cpumask(struct device_node *node, struct cpumask *mask) +{ + unsigned int c; + int idx = 0; + struct device_node *cpu_node; + char *key = "qcom,cpu-vctl-list"; + + cpu_node = of_parse_phandle(node, key, idx++); + while (cpu_node) { + for_each_possible_cpu(c) { + if (of_get_cpu_node(c, NULL) == cpu_node) + cpumask_set_cpu(c, mask); + } + cpu_node = of_parse_phandle(node, key, idx++); + } +} + +static int msm_spm_dev_probe(struct platform_device *pdev) +{ + int ret = 0; + int cpu = 0; + int i = 0; + struct device_node *node = pdev->dev.of_node; + struct device_node *n = NULL; + struct msm_spm_platform_data spm_data; + char *key = NULL; + uint32_t val = 0; + struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR]; + int len = 0; + struct msm_spm_device *dev = NULL; + struct resource *res = NULL; + uint32_t mode_count = 0; + + struct spm_of { + char *key; + uint32_t id; + }; + + struct spm_of spm_of_data[] = { + {"qcom,saw2-cfg", MSM_SPM_REG_SAW_CFG}, + {"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW_AVS_CTL}, + {"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW_AVS_HYSTERESIS}, + {"qcom,saw2-avs-limit", MSM_SPM_REG_SAW_AVS_LIMIT}, + {"qcom,saw2-avs-dly", MSM_SPM_REG_SAW_AVS_DLY}, + {"qcom,saw2-spm-dly", MSM_SPM_REG_SAW_SPM_DLY}, + {"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW_SPM_CTL}, + {"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW_PMIC_DATA_0}, + {"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW_PMIC_DATA_1}, + {"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW_PMIC_DATA_2}, + {"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW_PMIC_DATA_3}, + {"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW_PMIC_DATA_4}, + {"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW_PMIC_DATA_5}, + {"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW_PMIC_DATA_6}, + {"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW_PMIC_DATA_7}, + }; + + struct mode_of { + char *key; + uint32_t id; + }; + + struct mode_of mode_of_data[] = { + {"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING}, + {"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_RETENTION}, + {"qcom,saw2-spm-cmd-gdhs", MSM_SPM_MODE_GDHS}, + {"qcom,saw2-spm-cmd-spc", + MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE}, + {"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE}, + {"qcom,saw2-spm-cmd-fpc", MSM_SPM_MODE_FASTPC}, + }; + + dev = msm_spm_get_device(pdev); + if (!dev) { + /* + * For partial goods support some CPUs might not be available + * in which case, shouldn't throw an error + */ + return 0; + } + get_cpumask(node, &dev->mask); + + memset(&spm_data, 0, sizeof(struct msm_spm_platform_data)); + memset(&modes, 0, + (MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry)); + + key = "qcom,saw2-ver-reg"; + ret = of_property_read_u32(node, key, &val); + if (ret) + goto fail; + spm_data.ver_reg = val; + + key = "qcom,vctl-timeout-us"; + ret = of_property_read_u32(node, key, &val); + if (!ret) + spm_data.vctl_timeout_us = val; + + /* SAW start address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -EFAULT; + goto fail; + } + + spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!spm_data.reg_base_addr) { + ret = -ENOMEM; + goto fail; + } + + spm_data.vctl_port = -1; + spm_data.vctl_port_ub = -1; + spm_data.phase_port = -1; + spm_data.pfm_port = -1; + + key = "qcom,vctl-port"; + of_property_read_u32(node, key, &spm_data.vctl_port); + + key = "qcom,vctl-port-ub"; + of_property_read_u32(node, key, &spm_data.vctl_port_ub); + + key = "qcom,phase-port"; + of_property_read_u32(node, key, &spm_data.phase_port); + + key = "qcom,pfm-port"; + of_property_read_u32(node, key, &spm_data.pfm_port); + + /* Q2S (QChannel-2-SPM) register */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "q2s"); + if (res) { + dev->q2s_reg = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!dev->q2s_reg) { + pr_err("%s(): Unable to iomap Q2S register\n", + __func__); + ret = -EADDRNOTAVAIL; + goto fail; + } + } + + key = "qcom,use-qchannel-for-pc"; + dev->qchannel_ignore = !of_property_read_bool(node, key); + + key = "qcom,use-spm-clock-gating"; + dev->use_spm_clk_gating = of_property_read_bool(node, key); + + key = "qcom,use-qchannel-for-wfi"; + dev->use_qchannel_for_wfi = of_property_read_bool(node, key); + + /* HW flush address */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hw-flush"); + if (res) { + dev->flush_base_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dev->flush_base_addr)) { + ret = PTR_ERR(dev->flush_base_addr); + pr_err("%s(): Unable to iomap hw flush register %d\n", + __func__, ret); + goto fail; + } + } + + /* Sleep req address */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "slpreq"); + if (res) { + dev->slpreq_base_addr = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!dev->slpreq_base_addr) { + ret = -ENOMEM; + pr_err("%s(): Unable to iomap slpreq register\n", + __func__); + ret = -EADDRNOTAVAIL; + goto fail; + } + } + + /* + * At system boot, cpus and or clusters can remain in reset. CCI SPM + * will not be triggered unless SPM_LEGACY_MODE bit is set for the + * cluster in reset. Initialize q2s registers and set the + * SPM_LEGACY_MODE bit. + */ + msm_spm_config_q2s(dev, MSM_SPM_MODE_POWER_COLLAPSE); + msm_spm_drv_reg_init(&dev->reg_data, &spm_data); + + for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) { + ret = of_property_read_u32(node, spm_of_data[i].key, &val); + if (ret) + continue; + msm_spm_drv_upd_reg_shadow(&dev->reg_data, spm_of_data[i].id, + val); + } + + for_each_child_of_node(node, n) { + const char *name; + bool bit_set; + int sync; + + if (!n->name) + continue; + + ret = of_property_read_string(n, "qcom,label", &name); + if (ret) + continue; + + for (i = 0; i < ARRAY_SIZE(mode_of_data); i++) + if (!strcmp(name, mode_of_data[i].key)) + break; + + if (i == ARRAY_SIZE(mode_of_data)) { + pr_err("Mode name invalid %s\n", name); + break; + } + + modes[mode_count].mode = mode_of_data[i].id; + modes[mode_count].cmd = + (uint8_t *)of_get_property(n, "qcom,sequence", &len); + if (!modes[mode_count].cmd) { + pr_err("cmd is empty\n"); + continue; + } + + bit_set = of_property_read_bool(n, "qcom,pc_mode"); + modes[mode_count].ctl |= bit_set ? BIT(PC_MODE_BIT) : 0; + + bit_set = of_property_read_bool(n, "qcom,ret_mode"); + modes[mode_count].ctl |= bit_set ? BIT(RET_MODE_BIT) : 0; + + bit_set = of_property_read_bool(n, "qcom,slp_cmd_mode"); + modes[mode_count].ctl |= bit_set ? BIT(SLP_CMD_BIT) : 0; + + bit_set = of_property_read_bool(n, "qcom,isar"); + modes[mode_count].ctl |= bit_set ? BIT(ISAR_BIT) : 0; + + bit_set = of_property_read_bool(n, "qcom,spm_en"); + modes[mode_count].ctl |= bit_set ? BIT(SPM_EN_BIT) : 0; + + ret = of_property_read_u32(n, "qcom,event_sync", &sync); + if (!ret) + modes[mode_count].ctl |= sync << EVENT_SYNC_BIT; + + mode_count++; + } + + spm_data.modes = modes; + spm_data.num_modes = mode_count; + + key = "qcom,supports-rpm-hs"; + dev->allow_rpm_hs = of_property_read_bool(pdev->dev.of_node, key); + + ret = msm_spm_dev_init(dev, &spm_data); + if (ret) + pr_err("SPM modes programming is not available from HLOS\n"); + + platform_set_drvdata(pdev, dev); + + for_each_cpu(cpu, &dev->mask) + per_cpu(cpu_vctl_device, cpu) = dev; + + if (!spm_data.num_modes) + return 0; + + cpu = get_cpu_id(pdev->dev.of_node); + + /* For CPUs that are online, the SPM has to be programmed for + * clockgating mode to ensure that it can use SPM for entering these + * low power modes. + */ + cpus_read_lock(); + if ((cpu >= 0) && (cpu < num_possible_cpus()) && (cpu_online(cpu))) + msm_spm_config_low_power_mode(dev, MSM_SPM_MODE_CLOCK_GATING, + false); + cpus_read_unlock(); + return ret; + +fail: + cpu = get_cpu_id(pdev->dev.of_node); + if (dev && (cpu >= num_possible_cpus() || (cpu < 0))) { + for_each_cpu(cpu, &dev->mask) + per_cpu(cpu_vctl_device, cpu) = ERR_PTR(ret); + } + + pr_err("%s: CPU%d SPM device probe failed: %d\n", __func__, cpu, ret); + + return ret; +} + +static int msm_spm_dev_remove(struct platform_device *pdev) +{ + struct msm_spm_device *dev = platform_get_drvdata(pdev); + + list_del(&dev->list); + return 0; +} + +static const struct of_device_id msm_spm_match_table[] = { + {.compatible = "qcom,spm-v2"}, + {}, +}; + +static struct platform_driver msm_spm_device_driver = { + .probe = msm_spm_dev_probe, + .remove = msm_spm_dev_remove, + .driver = { + .name = "spm-v2", + .of_match_table = msm_spm_match_table, + }, +}; + +/** + * msm_spm_device_init(): Device tree initialization function + */ +int __init msm_spm_device_init(void) +{ + static bool registered; + + if (registered) + return 0; + registered = true; + return platform_driver_register(&msm_spm_device_driver); +} +arch_initcall(msm_spm_device_init); diff --git a/drivers/soc/qcom/spm_driver.h b/drivers/soc/qcom/spm_driver.h new file mode 100644 index 000000000000..1373960fdeb2 --- /dev/null +++ b/drivers/soc/qcom/spm_driver.h @@ -0,0 +1,132 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* + * Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H +#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H + +#include + +enum { + MSM_SPM_REG_SAW_CFG, + MSM_SPM_REG_SAW_AVS_CTL, + MSM_SPM_REG_SAW_AVS_HYSTERESIS, + MSM_SPM_REG_SAW_SPM_CTL, + MSM_SPM_REG_SAW_PMIC_DLY, + MSM_SPM_REG_SAW_AVS_LIMIT, + MSM_SPM_REG_SAW_AVS_DLY, + MSM_SPM_REG_SAW_SPM_DLY, + MSM_SPM_REG_SAW_PMIC_DATA_0, + MSM_SPM_REG_SAW_PMIC_DATA_1, + MSM_SPM_REG_SAW_PMIC_DATA_2, + MSM_SPM_REG_SAW_PMIC_DATA_3, + MSM_SPM_REG_SAW_PMIC_DATA_4, + MSM_SPM_REG_SAW_PMIC_DATA_5, + MSM_SPM_REG_SAW_PMIC_DATA_6, + MSM_SPM_REG_SAW_PMIC_DATA_7, + MSM_SPM_REG_SAW_RST, + + MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW_RST, + + MSM_SPM_REG_SAW_ID, + MSM_SPM_REG_SAW_SECURE, + MSM_SPM_REG_SAW_STS0, + MSM_SPM_REG_SAW_STS1, + MSM_SPM_REG_SAW_STS2, + MSM_SPM_REG_SAW_VCTL, + MSM_SPM_REG_SAW_SEQ_ENTRY, + MSM_SPM_REG_SAW_SPM_STS, + MSM_SPM_REG_SAW_AVS_STS, + MSM_SPM_REG_SAW_PMIC_STS, + MSM_SPM_REG_SAW_VERSION, + + MSM_SPM_REG_NR, +}; + +struct msm_spm_seq_entry { + uint32_t mode; + uint8_t *cmd; + uint32_t ctl; +}; + +struct msm_spm_platform_data { + void __iomem *reg_base_addr; + uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE]; + + uint32_t ver_reg; + uint32_t vctl_port; + int vctl_port_ub; + uint32_t phase_port; + uint32_t pfm_port; + + uint8_t awake_vlevel; + uint32_t vctl_timeout_us; + uint32_t avs_timeout_us; + + uint32_t num_modes; + struct msm_spm_seq_entry *modes; +}; + +enum msm_spm_pmic_port { + MSM_SPM_PMIC_VCTL_PORT, + MSM_SPM_PMIC_PHASE_PORT, + MSM_SPM_PMIC_PFM_PORT, +}; + +struct msm_spm_driver_data { + uint32_t major; + uint32_t minor; + uint32_t ver_reg; + uint32_t vctl_port; + int vctl_port_ub; + uint32_t phase_port; + uint32_t pfm_port; + void __iomem *reg_base_addr; + uint32_t vctl_timeout_us; + uint32_t avs_timeout_us; + uint32_t reg_shadow[MSM_SPM_REG_NR]; + uint32_t *reg_seq_entry_shadow; + uint32_t *reg_offsets; +}; + +int msm_spm_drv_init(struct msm_spm_driver_data *dev, + struct msm_spm_platform_data *data); +int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev, + struct msm_spm_platform_data *data); +void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq); +int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev, + uint32_t ctl); +int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, + unsigned int vlevel); +void dump_regs(struct msm_spm_driver_data *dev, int cpu); +uint32_t msm_spm_drv_get_sts_curr_pmic_data( + struct msm_spm_driver_data *dev); +int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev, + uint8_t *cmd, uint32_t *offset); +void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev); +int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev, + bool enable); +int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev, + enum msm_spm_pmic_port port, unsigned int data); + +int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev, + uint32_t min_lvl, uint32_t max_lvl); + +int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev, + bool enable); +int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev); + +int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev, + enum msm_spm_avs_irq irq, bool enable); +int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev, + enum msm_spm_avs_irq irq); + +void msm_spm_reinit(void); +int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs); +void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id, + int val); +uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev); +#endif diff --git a/drivers/spi/q2spi-gsi.c b/drivers/spi/q2spi-gsi.c index 0af382331462..364404699d5a 100644 --- a/drivers/spi/q2spi-gsi.c +++ b/drivers/spi/q2spi-gsi.c @@ -19,22 +19,20 @@ static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param u32 status = 0; if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) { - Q2SPI_DEBUG(q2spi, "%s for Doorbell\n", __func__); + Q2SPI_DBG_2(q2spi, "%s for Doorbell\n", __func__); xfer = q2spi->db_xfer; } else { xfer = q2spi_pkt->xfer; - Q2SPI_DEBUG(q2spi, "%s for Rx Event\n", __func__); + Q2SPI_DBG_2(q2spi, "%s for Rx Event\n", __func__); } if (!xfer || !xfer->rx_buf) { pr_err("%s rx buf NULL!!!\n", __func__); return; } - Q2SPI_DEBUG(q2spi, "%s cb_param:%p cb_param->len:%d cb_param->status:%d\n", - __func__, cb_param, cb_param->length, cb_param->status); - Q2SPI_DEBUG(q2spi, "%s xfer:%p rx_buf:%p rx_dma:%p rx_len:%d m_cmd_param:%d\n", - __func__, xfer, xfer->rx_buf, (void *)xfer->rx_dma, xfer->rx_len, - q2spi_pkt->m_cmd_param); + Q2SPI_DBG_2(q2spi, "%s cb_param:%p len:%d status:%d xfer:%p rx buf:%p dma:%p len:%d\n", + __func__, cb_param, cb_param->length, cb_param->status, xfer, xfer->rx_buf, + (void *)xfer->rx_dma, xfer->rx_len); /* check status is 0 or EOT for success */ status = cb_param->status; @@ -43,13 +41,13 @@ static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param q2spi_dump_ipc(q2spi, "rx_xfer_completion_event RX", (char *)xfer->rx_buf, cb_param->length); if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) { - Q2SPI_DEBUG(q2spi, "%s call db_rx_cb\n", __func__); + Q2SPI_DBG_1(q2spi, "%s call db_rx_cb\n", __func__); complete_all(&q2spi->db_rx_cb); } else { - Q2SPI_DEBUG(q2spi, "%s call rx_cb\n", __func__); + Q2SPI_DBG_1(q2spi, "%s call rx_cb\n", __func__); complete_all(&q2spi->rx_cb); } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n", __func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype); } else { Q2SPI_DEBUG(q2spi, "%s Err length miss-match %d %d\n", @@ -77,10 +75,10 @@ static void q2spi_tx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param xfer = q2spi_pkt->xfer; - Q2SPI_DEBUG(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__, xfer->tx_len, cb_param->length); if (cb_param->length == xfer->tx_len) { - Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__); + Q2SPI_DBG_1(q2spi, "%s complete_tx_cb\n", __func__); complete_all(&q2spi->tx_cb); } else { Q2SPI_DEBUG(q2spi, "%s Err length miss-match\n", __func__); @@ -94,15 +92,14 @@ static void q2spi_parse_q2spi_status(struct msm_gpi_dma_async_tx_cb_param *cb_pa u32 status = 0; status = cb_param->q2spi_status; - Q2SPI_DEBUG(q2spi, "%s status:%d complete_tx_cb\n", __func__, status); + Q2SPI_DBG_1(q2spi, "%s status:%d complete_tx_cb\n", __func__, status); complete_all(&q2spi->tx_cb); - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n", __func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype); } static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb const *cb) { - Q2SPI_DEBUG(q2spi, "%s line:%d\n", __func__, __LINE__); q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event); } @@ -171,13 +168,16 @@ static void q2spi_gsi_tx_callback(void *cb) complete_all(&q2spi->tx_cb); return; } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) { - Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); + Q2SPI_DBG_2(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) { - Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); + Q2SPI_DBG_1(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); q2spi_tx_xfer_completion_event(cb_param); } else if (cb_param->tce_type == QUP_TCE_TYPE_Q2SPI_STATUS) { - Q2SPI_DEBUG(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__); + Q2SPI_DBG_1(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__); q2spi_parse_q2spi_status(cb_param); + } else { + Q2SPI_ERROR(q2spi, "%s cb_param->tce_type:%d\n", + __func__, cb_param->tce_type); } } } @@ -214,17 +214,16 @@ static void q2spi_gsi_rx_callback(void *cb) __func__, cb_param->status); return; } else if (cb_param->completion_code == MSM_GPI_TCE_EOT) { - Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); + Q2SPI_DBG_2(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__); if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) { /* CR header */ - Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); + Q2SPI_DBG_1(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__); q2spi_rx_xfer_completion_event(cb_param); } } else { Q2SPI_DEBUG(q2spi, "%s: Err cb_param->completion_code = %d\n", __func__, cb_param->completion_code); } - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); } static void q2spi_geni_deallocate_chan(struct q2spi_gsi *gsi) @@ -266,7 +265,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi) return -ENOMEM; } q2spi->gsi = gsi; - Q2SPI_DEBUG(q2spi, "%s gsi:%p\n", __func__, gsi); + Q2SPI_DBG_2(q2spi, "%s gsi:%p\n", __func__, gsi); if (gsi->chan_setup) { Q2SPI_DEBUG(q2spi, "%s Err GSI channel already configured\n", __func__); return ret; @@ -279,7 +278,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi) q2spi_kfree(q2spi, q2spi->gsi, __LINE__); return -EIO; } - Q2SPI_DEBUG(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c); + Q2SPI_DBG_2(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c); gsi->rx_c = dma_request_slave_channel(q2spi->dev, "rx"); if (IS_ERR_OR_NULL(gsi->rx_c)) { Q2SPI_ERROR(q2spi, "%s Err Failed to get rx DMA ch %ld\n", @@ -289,7 +288,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi) q2spi_kfree(q2spi, q2spi->gsi, __LINE__); return -EIO; } - Q2SPI_DEBUG(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c); + Q2SPI_DBG_2(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c); gsi->tx_ev.init.callback = q2spi_gsi_ch_ev_cb; gsi->tx_ev.init.cb_param = q2spi; gsi->tx_ev.cmd = MSM_GPI_INIT; @@ -309,7 +308,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi) Q2SPI_ERROR(q2spi, "%s rx dma slave config ret :%d\n", __func__, ret); goto dmaengine_slave_config_fail; } - Q2SPI_DEBUG(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi); + Q2SPI_DBG_1(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi); q2spi->gsi->chan_setup = true; return ret; @@ -325,7 +324,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id struct geni_se *se = &q2spi->se; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s Start PID=%d\n", __func__, current->pid); ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling), clk_idx, &sclk_freq, false); @@ -345,7 +344,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id res_freq = (sclk_freq / (*clk_div)); - Q2SPI_DEBUG(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n", + Q2SPI_DBG_1(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n", __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div); ret = clk_set_rate(se->clk, sclk_freq); @@ -353,7 +352,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid); return 0; } @@ -375,7 +374,6 @@ static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi) int ssn = S_GP_CNT7_SSN; int cn_delay = M_GP_CNT6_CN_DELAY; - Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid); ret = get_q2spi_clk_cfg(q2spi->cur_speed_hz, q2spi, &idx, &div); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err setting clks:%d\n", __func__, ret); @@ -386,16 +384,14 @@ static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi) pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN); cs_mode = CS_LESS_MODE; intr_pol = INTR_HIGH_POLARITY; - Q2SPI_DEBUG(q2spi, "%s cs_mode 0x%x word %d pack %d idx %d div %d\n", - __func__, cs_mode, word_len, pack, idx, div); /* config0 */ c0_tre->dword[0] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode, intr_pol, word_len); c0_tre->dword[1] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_delay, ssn); c0_tre->dword[2] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, idx, div); c0_tre->dword[3] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1); - Q2SPI_DEBUG(q2spi, "%s c0_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n", - __func__, c0_tre->dword[0], c0_tre->dword[1], + Q2SPI_DBG_2(q2spi, "%s cs:x%x word:%d pack:%d idx:%d div:%d dword:0x%x 0x%x 0x%x 0x%x\n", + __func__, cs_mode, word_len, pack, idx, div, c0_tre->dword[0], c0_tre->dword[1], c0_tre->dword[2], c0_tre->dword[3]); q2spi->setup_config0 = true; return c0_tre; @@ -435,19 +431,10 @@ msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, struct q2spi_g link_rx = 1; } go_tre->dword[3] = MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob, chain); - Q2SPI_DEBUG(q2spi, "%s rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n", - __func__, rx_len, flags, cs, cmd, eot, eob, chain); - if (cmd == Q2SPI_RX_ONLY) - Q2SPI_DEBUG(q2spi, "%s Q2SPI_RX_ONLY\n", __func__); - else if (cmd == Q2SPI_TX_ONLY) - Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_ONLY\n", __func__); - else if (cmd == Q2SPI_TX_RX) - Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_RX_ONLY\n", __func__); - - Q2SPI_DEBUG(q2spi, "%s go_tre dword[0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n", - __func__, go_tre->dword[0], go_tre->dword[1], go_tre->dword[2], - go_tre->dword[3]); + Q2SPI_DBG_2(q2spi, "%s len:%d flags:0x%x cs:%d cmd:%d chain %d dword:0x%x 0x%x 0x%x 0x%x\n", + __func__, rx_len, flags, cs, cmd, chain, go_tre->dword[0], + go_tre->dword[1], go_tre->dword[2], go_tre->dword[3]); return go_tre; } @@ -463,7 +450,7 @@ msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre, dma_addr_t buf, u32 len, tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf); tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len); tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0); - Q2SPI_DEBUG(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n", + Q2SPI_DBG_2(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n", __func__, tre->dword[0], tre->dword[1], tre->dword[2], tre->dword[3]); return tre; @@ -480,7 +467,7 @@ int check_gsi_transfer_completion_db_rx(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s Rx[%d] timeout%lu\n", __func__, i, timeout); ret = -ETIMEDOUT; } else { - Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__); + Q2SPI_DBG_1(q2spi, "%s rx completed\n", __func__); } if (q2spi->gsi->qup_gsi_err) { @@ -499,7 +486,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) unsigned long timeleft = 0, xfer_timeout = 0; xfer_timeout = XFER_TIMEOUT_OFFSET; - Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__, q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot); for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) { timeleft = @@ -509,7 +496,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) ret = -ETIMEDOUT; goto err_gsi_geni_transfer; } else if (!q2spi->gsi->qup_gsi_err) { - Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__); + Q2SPI_DBG_1(q2spi, "%s tx completed\n", __func__); } } @@ -521,7 +508,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi) ret = -ETIMEDOUT; goto err_gsi_geni_transfer; } else if (!q2spi->gsi->qup_gsi_err) { - Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__); + Q2SPI_DBG_1(q2spi, "%s rx completed\n", __func__); } } err_gsi_geni_transfer: @@ -564,10 +551,8 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) xfer = q2spi_pkt->xfer; cmd = xfer->cmd; - Q2SPI_DEBUG(q2spi, "%s PID=%d xfer:%p vtype=%d\n", __func__, - current->pid, xfer, q2spi_pkt->vtype); - - Q2SPI_DEBUG(q2spi, "%s cmd:%d q2spi_pkt:%p\n", __func__, cmd, q2spi_pkt); + Q2SPI_DBG_2(q2spi, "%s PID=%d xfer:%p vtype=%d cmd:%d q2spi_pkt:%p\n", __func__, + current->pid, xfer, q2spi_pkt->vtype, cmd, q2spi_pkt); q2spi->gsi->num_tx_eot = 0; q2spi->gsi->num_rx_eot = 0; q2spi->gsi->qup_gsi_err = false; @@ -605,7 +590,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) tx_nent += 2; rx_nent++; } - Q2SPI_DEBUG(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent); + Q2SPI_DBG_2(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent); sg_init_table(xfer_tx_sg, tx_nent); if (rx_nent) sg_init_table(xfer_rx_sg, rx_nent); @@ -632,7 +617,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) q2spi->gsi->tx_desc->callback_param = &q2spi->gsi->tx_cb_param; q2spi->gsi->tx_cb_param.userdata = q2spi_pkt; q2spi->gsi->tx_cookie = dmaengine_submit(q2spi->gsi->tx_desc); - Q2SPI_DEBUG(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param); + Q2SPI_DBG_2(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param); if (dma_submit_error(q2spi->gsi->tx_cookie)) { Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n", __func__, q2spi->gsi->tx_cookie); @@ -660,7 +645,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) q2spi->gsi->rx_cb_param.userdata = q2spi_pkt; q2spi->gsi->num_rx_eot++; q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->rx_desc); - Q2SPI_DEBUG(q2spi, "%s Rx cb_param:%p\n", __func__, + Q2SPI_DBG_2(q2spi, "%s Rx cb_param:%p\n", __func__, q2spi->gsi->rx_desc->callback_param); if (dma_submit_error(q2spi->gsi->rx_cookie)) { Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n", @@ -688,7 +673,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) q2spi->gsi->db_rx_cb_param.userdata = q2spi_pkt; q2spi->gsi->num_rx_eot++; q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->db_rx_desc); - Q2SPI_DEBUG(q2spi, "%s DB cb_param:%p\n", __func__, + Q2SPI_DBG_1(q2spi, "%s DB cb_param:%p\n", __func__, q2spi->gsi->db_rx_desc->callback_param); if (dma_submit_error(q2spi->gsi->rx_cookie)) { Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n", @@ -698,7 +683,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) } } if (cmd & Q2SPI_RX_ONLY) { - Q2SPI_DEBUG(q2spi, "%s rx_c dma_async_issue_pending\n", __func__); + Q2SPI_DBG_1(q2spi, "%s rx_c dma_async_issue_pending\n", __func__); q2spi_dump_ipc(q2spi, "GSI DMA-RX", (char *)xfer->rx_buf, tx_rx_len); if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) reinit_completion(&q2spi->db_rx_cb); @@ -711,10 +696,10 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt) q2spi_dump_ipc(q2spi, "GSI DMA TX", (char *)xfer->tx_buf, Q2SPI_HEADER_LEN + tx_rx_len); - Q2SPI_DEBUG(q2spi, "%s tx_c dma_async_issue_pending\n", __func__); + Q2SPI_DBG_1(q2spi, "%s tx_c dma_async_issue_pending\n", __func__); reinit_completion(&q2spi->tx_cb); dma_async_issue_pending(q2spi->gsi->tx_c); - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid); return 0; } @@ -724,11 +709,11 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void * struct q2spi_geni *q2spi = ptr; int num_crs, i = 0; - Q2SPI_DEBUG(q2spi, "%s event:%s\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event)); + Q2SPI_DBG_1(q2spi, "%s event:%s\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event)); switch (cb->cb_event) { case MSM_GPI_QUP_NOTIFY: case MSM_GPI_QUP_MAX_EVENT: - Q2SPI_DEBUG(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n", + Q2SPI_DBG_1(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event), cb->status, cb->timestamp, cb->count); break; @@ -738,10 +723,10 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void * case MSM_GPI_QUP_PENDING_EVENT: case MSM_GPI_QUP_EOT_DESC_MISMATCH: case MSM_GPI_QUP_SW_ERROR: - Q2SPI_DEBUG(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n", + Q2SPI_DBG_1(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event), cb->status, cb->timestamp, cb->count); - Q2SPI_DEBUG(q2spi, "%s err_routine:%u err_type:%u err.code%u\n", + Q2SPI_DBG_2(q2spi, "%s err_routine:%u err_type:%u err.code%u\n", __func__, cb->error_log.routine, cb->error_log.type, cb->error_log.error_code); q2spi->gsi->qup_gsi_err = true; @@ -777,7 +762,7 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void * atomic_set(&q2spi->sma_wr_pending, 1); } } - Q2SPI_DEBUG(q2spi, "%s GSI doorbell event, db_pending:%d\n", + Q2SPI_DBG_2(q2spi, "%s GSI doorbell event, db_pending:%d\n", __func__, atomic_read(&q2spi->doorbell_pending)); q2spi_parse_cr_header(q2spi, cb); break; diff --git a/drivers/spi/q2spi-msm-geni.c b/drivers/spi/q2spi-msm-geni.c index 22ed83dd5bb0..79bc26c3dce5 100644 --- a/drivers/spi/q2spi-msm-geni.c +++ b/drivers/spi/q2spi-msm-geni.c @@ -61,7 +61,7 @@ void *q2spi_kzalloc(struct q2spi_geni *q2spi, int size, int line) if (ptr) { atomic_inc(&q2spi->alloc_count); - Q2SPI_DEBUG(q2spi, "Allocated 0x%p at %d, count:%d\n", + Q2SPI_DBG_2(q2spi, "Allocated 0x%p at %d, count:%d\n", ptr, line, atomic_read(&q2spi->alloc_count)); } return ptr; @@ -81,7 +81,7 @@ void q2spi_kfree(struct q2spi_geni *q2spi, void *ptr, int line) atomic_dec(&q2spi->alloc_count); kfree(ptr); } - Q2SPI_DEBUG(q2spi, "Freeing 0x%p from %d, count:%d\n", + Q2SPI_DBG_2(q2spi, "Freeing 0x%p from %d, count:%d\n", ptr, line, atomic_read(&q2spi->alloc_count)); } @@ -99,17 +99,15 @@ void __q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix, } /** - * q2spi_dump_ipc - Log dump function for debugging + * q2spi_dump_ipc_always - Log dump function * @q2spi: Pointer to main q2spi_geni structure * @prefix: Prefix to use in log * @str: String to dump in log * @size: Size of data bytes per line * - * free bulk dma mapped buffers allocated by q2spi_pre_alloc_buffers api - * * Return: none */ -void q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix, char *str, int size) +void q2spi_dump_ipc_always(struct q2spi_geni *q2spi, char *prefix, char *str, int size) { int offset = 0, total_bytes = size; @@ -130,6 +128,64 @@ void q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix, char *str, int size) __q2spi_dump_ipc(q2spi, prefix, (char *)str + offset, total_bytes, offset, size); } +/** + * q2spi_dump_ipc - Log dump function with log level + * @q2spi: Pointer to main q2spi_geni structure + * @prefix: Prefix to use in log + * @str: String to dump in log + * @size: Size of data bytes per line + * + * Return: none + */ +void q2spi_dump_ipc(struct q2spi_geni *q2spi, char *prefix, char *str, int size) +{ + if (q2spi->q2spi_log_lvl < 1) + return; + + q2spi_dump_ipc_always(q2spi, prefix, str, size); +} + +/* + * log_level_show() - Prints the value stored in log_level sysfs entry + * + * @dev: pointer to device + * @attr: device attributes + * @buf: buffer to store the log_level value + * + * Return: prints q2spi log level value + */ +static ssize_t log_level_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct q2spi_geni *q2spi = get_q2spi(dev); + + return scnprintf(buf, sizeof(int), "%d\n", q2spi->q2spi_log_lvl); +} + +/* + * log_level_store() - store the q2spi log_level sysfs value + * + * @dev: pointer to device + * @attr: device attributes + * @buf: buffer which contains the log_level in string format + * @size: returns the value of size + * + * Return: Size copied in the buffer + */ +static ssize_t log_level_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct q2spi_geni *q2spi = get_q2spi(dev); + + if (kstrtoint(buf, 0, &q2spi->q2spi_log_lvl)) { + dev_err(dev, "%s Invalid input\n", __func__); + return -EINVAL; + } + + return size; +} + +static DEVICE_ATTR_RW(log_level); + /* * max_dump_size_show() - Prints the value stored in max_dump_size sysfs entry * @@ -203,9 +259,9 @@ const char *q2spi_pkt_state(struct q2spi_packet *q2spi_pkt) void q2spi_tx_queue_status(struct q2spi_geni *q2spi) { if (list_empty(&q2spi->tx_queue_list)) - Q2SPI_DEBUG(q2spi, "%s tx_queue empty\n", __func__); + Q2SPI_DBG_2(q2spi, "%s tx_queue empty\n", __func__); else - Q2SPI_DEBUG(q2spi, "%s tx_queue not empty!\n", __func__); + Q2SPI_DBG_2(q2spi, "%s tx_queue not empty!\n", __func__); } /** @@ -217,7 +273,7 @@ void q2spi_tx_queue_status(struct q2spi_geni *q2spi) void q2spi_free_q2spi_pkt(struct q2spi_packet *q2spi_pkt, int line) { if (q2spi_pkt->xfer) { - Q2SPI_DEBUG(q2spi_pkt->q2spi, "%s q2spi_pkt=%p q2spi_pkt->xfer=%p\n", + Q2SPI_DBG_2(q2spi_pkt->q2spi, "%s q2spi_pkt=%p q2spi_pkt->xfer=%p\n", __func__, q2spi_pkt, q2spi_pkt->xfer); q2spi_kfree(q2spi_pkt->q2spi, q2spi_pkt->xfer, line); q2spi_kfree(q2spi_pkt->q2spi, q2spi_pkt, line); @@ -246,7 +302,7 @@ struct q2spi_packet *q2spi_alloc_q2spi_pkt(struct q2spi_geni *q2spi, int line) q2spi_pkt = NULL; return NULL; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt=%p PID=%d\n", __func__, q2spi_pkt, current->pid); + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt=%p PID=%d\n", __func__, q2spi_pkt, current->pid); init_completion(&q2spi_pkt->bulk_wait); init_completion(&q2spi_pkt->wait_for_db); q2spi_pkt->q2spi = q2spi; @@ -404,19 +460,19 @@ static int q2spi_free_resp_buf(struct q2spi_geni *q2spi) static void q2spi_free_dma_buf(struct q2spi_geni *q2spi) { if (q2spi_free_bulk_buf(q2spi)) - Q2SPI_DEBUG(q2spi, "%s Err free bulk buf fail\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Err free bulk buf fail\n", __func__); if (q2spi_free_cr_buf(q2spi)) - Q2SPI_DEBUG(q2spi, "%s Err free cr buf fail\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Err free cr buf fail\n", __func__); if (q2spi_free_var5_buf(q2spi)) - Q2SPI_DEBUG(q2spi, "%s Err free var5 buf fail\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Err free var5 buf fail\n", __func__); if (q2spi_free_var1_buf(q2spi)) - Q2SPI_DEBUG(q2spi, "%s Err free var1 buf fail\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Err free var1 buf fail\n", __func__); if (q2spi_free_resp_buf(q2spi)) - Q2SPI_DEBUG(q2spi, "%s Err free resp buf fail\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Err free resp buf fail\n", __func__); } /** @@ -469,13 +525,13 @@ static int q2spi_pre_alloc_buffers(struct q2spi_geni *q2spi) goto exit_dealloc; } - Q2SPI_DEBUG(q2spi, "%s var1_buf[%d] virt:%p phy:%p\n", __func__, i, + Q2SPI_DBG_2(q2spi, "%s var1_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->var1_buf[i], (void *)q2spi->var1_dma_buf[i]); - Q2SPI_DEBUG(q2spi, "%s var5_buf[%d] virt:%p phy:%p\n", __func__, i, + Q2SPI_DBG_2(q2spi, "%s var5_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->var5_buf[i], (void *)q2spi->var5_dma_buf[i]); - Q2SPI_DEBUG(q2spi, "%s cr_buf[%d] virt:%p phy:%p\n", __func__, i, + Q2SPI_DBG_2(q2spi, "%s cr_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->cr_buf[i], (void *)q2spi->cr_dma_buf[i]); - Q2SPI_DEBUG(q2spi, "%s bulk_buf[%d] virt:%p phy:%p\n", __func__, i, + Q2SPI_DBG_2(q2spi, "%s bulk_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->bulk_buf[i], (void *)q2spi->bulk_dma_buf[i]); } @@ -488,7 +544,7 @@ static int q2spi_pre_alloc_buffers(struct q2spi_geni *q2spi) goto exit_dealloc; } - Q2SPI_DEBUG(q2spi, "%s resp_buf[%d] virt:%p phy:%p\n", __func__, i, + Q2SPI_DBG_2(q2spi, "%s resp_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->resp_buf[i], (void *)q2spi->resp_dma_buf[i]); } return 0; @@ -517,13 +573,13 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t return; } - Q2SPI_DEBUG(q2spi, "%s PID:%d for tx_dma:%p rx_dma:%p\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PID:%d for tx_dma:%p rx_dma:%p\n", __func__, current->pid, (void *)tx_dma, (void *)rx_dma); for (i = 0; i < Q2SPI_MAX_BUF; i++) { if (tx_dma == q2spi->var1_dma_buf[i]) { if (q2spi->var1_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s UNMAP var1_buf[%d] virt:%p phy:%p\n", + Q2SPI_DBG_2(q2spi, "%s UNMAP var1_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->var1_buf[i], (void *)q2spi->var1_dma_buf[i]); q2spi->var1_buf_used[i] = NULL; @@ -531,7 +587,7 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t } } else if (tx_dma == q2spi->var5_dma_buf[i]) { if (q2spi->var5_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s UNMAP var5_buf[%d] virt:%p phy:%p\n", + Q2SPI_DBG_2(q2spi, "%s UNMAP var5_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->var5_buf[i], (void *)q2spi->var5_dma_buf[i]); q2spi->var5_buf_used[i] = NULL; @@ -540,7 +596,7 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t } if (rx_dma == q2spi->cr_dma_buf[i]) { if (q2spi->cr_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s UNMAP cr_buf[%d] virt:%p phy:%p\n", + Q2SPI_DBG_2(q2spi, "%s UNMAP cr_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->cr_buf[i], (void *)q2spi->cr_dma_buf[i]); q2spi->cr_buf_used[i] = NULL; @@ -548,7 +604,7 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t } } else if (rx_dma == q2spi->bulk_dma_buf[i]) { if (q2spi->bulk_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s UNMAP bulk_buf[%d] virt:%p phy:%p\n", + Q2SPI_DBG_2(q2spi, "%s UNMAP bulk_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->bulk_buf[i], (void *)q2spi->bulk_dma_buf[i]); q2spi->bulk_buf_used[i] = NULL; @@ -557,9 +613,9 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t } } if (!unmapped) - Q2SPI_DEBUG(q2spi, "%s PID:%d Err unmap fail for tx_dma:%p rx_dma:%p\n", + Q2SPI_DBG_2(q2spi, "%s PID:%d Err unmap fail for tx_dma:%p rx_dma:%p\n", __func__, current->pid, (void *)tx_dma, (void *)rx_dma); - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid); } /** @@ -573,16 +629,15 @@ q2spi_unmap_dma_buf_used(struct q2spi_geni *q2spi, dma_addr_t tx_dma, dma_addr_t void q2spi_unmap_var_bufs(struct q2spi_geni *q2spi, struct q2spi_packet *q2spi_pkt) { if (q2spi_pkt->vtype == VARIANT_1_LRA || q2spi_pkt->vtype == VARIANT_1_HRF) { - Q2SPI_DEBUG(q2spi, "%s Unmapping Var1 buffers..\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Unmapping Var1 buffers..\n", __func__); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var1_tx_dma, q2spi_pkt->var1_rx_dma); } else if (q2spi_pkt->vtype == VARIANT_5) { - Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffers..\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Unmapping Var5 buffers..\n", __func__); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_rx_dma); } else if (q2spi_pkt->vtype == VARIANT_5_HRF) { - Q2SPI_DEBUG(q2spi, "%s Unmapping Var1 and Var5 buffers..\n", - __func__); + Q2SPI_DBG_1(q2spi, "%s Unmapping Var1 and Var5 buffers..\n", __func__); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var1_tx_dma, (dma_addr_t)NULL); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma, @@ -609,13 +664,12 @@ static int q2spi_get_doorbell_rx_buf(struct q2spi_geni *q2spi) /* Pick rx buffers from pre allocated pool */ for (i = 0; i < Q2SPI_MAX_BUF; i++) { if (!q2spi->cr_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s q2spi_db_xfer:%p\n", __func__, q2spi->db_xfer); xfer->rx_buf = q2spi->cr_buf[i]; xfer->rx_dma = q2spi->cr_dma_buf[i]; q2spi->cr_buf_used[i] = q2spi->cr_buf[i]; q2spi->rx_buf = xfer->rx_buf; - Q2SPI_DEBUG(q2spi, "ALLOC %s db rx_buf:%p rx_dma:%p\n", - __func__, xfer->rx_buf, (void *)xfer->rx_dma); + Q2SPI_DBG_2(q2spi, "ALLOC %s db xfer:%p rx_buf:%p rx_dma:%p\n", + __func__, q2spi->db_xfer, xfer->rx_buf, (void *)xfer->rx_dma); memset(xfer->rx_buf, 0xdb, RX_DMA_CR_BUF_SIZE); return 0; } @@ -645,13 +699,13 @@ static void q2spi_unmap_rx_buf(struct q2spi_packet *q2spi_pkt) return; } - Q2SPI_DEBUG(q2spi, "%s PID:%d rx_buf %p %p\n", __func__, + Q2SPI_DBG_1(q2spi, "%s PID:%d rx_buf %p %p\n", __func__, current->pid, (void *)xfer->rx_buf, (void *)xfer->rx_dma); for (i = 0; i < Q2SPI_MAX_RESP_BUF; i++) { if (xfer->rx_dma == q2spi->resp_dma_buf[i]) { if (q2spi->resp_buf_used[i]) { - Q2SPI_DEBUG(q2spi, "%s UNMAP rx_buf[%d] virt:%p phy:%p\n", + Q2SPI_DBG_1(q2spi, "%s UNMAP rx_buf[%d] virt:%p phy:%p\n", __func__, i, (void *)q2spi->resp_buf[i], (void *)q2spi->resp_dma_buf[i]); q2spi->resp_buf_used[i] = NULL; @@ -660,9 +714,9 @@ static void q2spi_unmap_rx_buf(struct q2spi_packet *q2spi_pkt) } } if (!unmapped) - Q2SPI_DEBUG(q2spi, "%s PID:%d Err unmap fail for rx_dma:%p\n", + Q2SPI_DBG_1(q2spi, "%s PID:%d Err unmap fail for rx_dma:%p\n", __func__, current->pid, (void *)xfer->rx_dma); - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid); } /** @@ -682,7 +736,6 @@ static int q2spi_get_rx_buf(struct q2spi_packet *q2spi_pkt, int len) struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer; int i; - Q2SPI_DEBUG(q2spi, "%s len:%d\n", __func__, len); if (!len) { Q2SPI_DEBUG(q2spi, "%s Err Zero length for alloc\n", __func__); return -EINVAL; @@ -694,9 +747,9 @@ static int q2spi_get_rx_buf(struct q2spi_packet *q2spi_pkt, int len) xfer->rx_buf = q2spi->resp_buf[i]; xfer->rx_dma = q2spi->resp_dma_buf[i]; memset(xfer->rx_buf, 0xba, Q2SPI_RESP_BUF_SIZE); - Q2SPI_DEBUG(q2spi, "%s ALLOC rx buf %p dma_buf:%p\n", + Q2SPI_DBG_1(q2spi, "%s ALLOC rx buf %p dma_buf:%p len:%d\n", __func__, (void *)q2spi->resp_buf[i], - (void *)q2spi->resp_dma_buf[i]); + (void *)q2spi->resp_dma_buf[i], len); return 0; } } @@ -736,7 +789,7 @@ static int q2spi_hrf_entry_format_sleep(struct q2spi_geni *q2spi, struct q2spi_r q2spi_hrf_req->cmd = LOCAL_REG_WRITE; memcpy(q2spi_hrf_req->data_buff, &hrf_entry, sizeof(struct q2spi_mc_hrf_entry)); - Q2SPI_DEBUG(q2spi, "%s End q2spi_hrf_req:%p\n", __func__, q2spi_hrf_req); + Q2SPI_DBG_2(q2spi, "%s End q2spi_hrf_req:%p\n", __func__, q2spi_hrf_req); return 0; } /** @@ -784,7 +837,7 @@ static int q2spi_hrf_entry_format(struct q2spi_geni *q2spi, struct q2spi_request return -EINVAL; } hrf_entry.flow_id = flow_id; - Q2SPI_DEBUG(q2spi, "%s flow_id:%d len:%d", __func__, hrf_entry.flow_id, q2spi_req.data_len); + Q2SPI_DBG_1(q2spi, "%s flow_id:%d len:%d", __func__, hrf_entry.flow_id, q2spi_req.data_len); if (q2spi_req.data_len % 4) { hrf_entry.dwlen_part1 = (q2spi_req.data_len / 4) & 0xF; hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4) >> 4) & 0xFF; @@ -794,7 +847,7 @@ static int q2spi_hrf_entry_format(struct q2spi_geni *q2spi, struct q2spi_request hrf_entry.dwlen_part2 = ((q2spi_req.data_len / 4 - 1) >> 4) & 0xFF; hrf_entry.dwlen_part3 = ((q2spi_req.data_len / 4 - 1) >> 12) & 0xFF; } - Q2SPI_DEBUG(q2spi, "%s hrf_entry dwlen part1:%d part2:%d part3:%d\n", + Q2SPI_DBG_2(q2spi, "%s hrf_entry dwlen part1:%d part2:%d part3:%d\n", __func__, hrf_entry.dwlen_part1, hrf_entry.dwlen_part2, hrf_entry.dwlen_part3); hrf_entry.arg2 = q2spi_req.end_point; hrf_entry.arg3 = q2spi_req.proto_ind; @@ -819,7 +872,7 @@ void q2spi_wait_for_doorbell_setup_ready(struct q2spi_geni *q2spi) long timeout = 0; if (!q2spi->doorbell_setup) { - Q2SPI_DEBUG(q2spi, "%s: Waiting for Doorbell buffers to be setup\n", __func__); + Q2SPI_DBG_1(q2spi, "%s: Waiting for Doorbell buffers to be setup\n", __func__); reinit_completion(&q2spi->db_setup_wait); timeout = wait_for_completion_interruptible_timeout(&q2spi->db_setup_wait, msecs_to_jiffies(50)); @@ -863,16 +916,16 @@ int q2spi_map_doorbell_rx_buf(struct q2spi_geni *q2spi) struct q2spi_packet *q2spi_pkt; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s Enter PID=%d\n", __func__, current->pid); if (q2spi_sys_restart) return -ERESTARTSYS; if (q2spi->port_release || atomic_read(&q2spi->is_suspend)) { - Q2SPI_DEBUG(q2spi, "%s Port being closed or suspend return\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Port being closed or suspend return\n", __func__); return 0; } if (q2spi->db_xfer->rx_dma) { - Q2SPI_DEBUG(q2spi, "%s Doorbell buffer already mapped\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Doorbell buffer already mapped\n", __func__); return 0; } @@ -896,19 +949,19 @@ int q2spi_map_doorbell_rx_buf(struct q2spi_geni *q2spi) q2spi->db_xfer->rx_len = RX_DMA_CR_BUF_SIZE; q2spi->db_xfer->q2spi_pkt = q2spi_pkt; q2spi_pkt->q2spi = q2spi; - Q2SPI_DEBUG(q2spi, "%s PID=%d wait for gsi_lock\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID=%d wait for gsi_lock\n", __func__, current->pid); mutex_lock(&q2spi->gsi_lock); - Q2SPI_DEBUG(q2spi, "%s PID=%d acquired gsi_lock\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID=%d acquired gsi_lock\n", __func__, current->pid); ret = q2spi_setup_gsi_xfer(q2spi_pkt); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret); mutex_unlock(&q2spi->gsi_lock); return ret; } - Q2SPI_DEBUG(q2spi, "%s PID=%d release gsi_lock\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID=%d release gsi_lock\n", __func__, current->pid); mutex_unlock(&q2spi->gsi_lock); q2spi->doorbell_setup = true; - Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid); complete_all(&q2spi->db_setup_wait); return ret; } @@ -947,7 +1000,7 @@ void *q2spi_alloc_host_variant(struct q2spi_geni *q2spi, int len) void q2spi_doorbell(struct q2spi_geni *q2spi, const struct qup_q2spi_cr_header_event *q2spi_cr_hdr_event) { - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s Enter PID=%d\n", __func__, current->pid); if (q2spi_sys_restart) return; @@ -955,7 +1008,6 @@ void q2spi_doorbell(struct q2spi_geni *q2spi, memcpy(&q2spi->q2spi_cr_hdr_event, q2spi_cr_hdr_event, sizeof(struct qup_q2spi_cr_header_event)); queue_work(q2spi->doorbell_wq, &q2spi->q2spi_doorbell_work); - Q2SPI_DEBUG(q2spi, "%s End work queued PID=%d\n", __func__, current->pid); } /** @@ -986,22 +1038,20 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) } spin_lock_irqsave(&q2spi->cr_queue_lock, flags); q2spi_cr_pkt->num_valid_crs = q2spi_cr_hdr_event->byte0_len; - Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt:%p hdr_0:0x%x no_of_crs=%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s q2spi_cr_pkt:%p hdr_0:0x%x no_of_crs=%d\n", __func__, q2spi_cr_pkt, q2spi_cr_hdr_event->cr_hdr[0], q2spi_cr_pkt->num_valid_crs); if (q2spi_cr_hdr_event->byte0_err) - Q2SPI_DEBUG(q2spi, "%s Error: q2spi_cr_hdr_event->byte0_err=%d\n", + Q2SPI_DEBUG(q2spi, "%s Err q2spi_cr_hdr_event->byte0_err=%d\n", __func__, q2spi_cr_hdr_event->byte0_err); for (i = 0; i < q2spi_cr_pkt->num_valid_crs; i++) { - Q2SPI_DEBUG(q2spi, "%s hdr_[%d]:0x%x\n", - __func__, i, q2spi_cr_hdr_event->cr_hdr[i]); q2spi_cr_pkt->cr_hdr[i].cmd = (q2spi_cr_hdr_event->cr_hdr[i]) & 0xF; q2spi_cr_pkt->cr_hdr[i].flow = (q2spi_cr_hdr_event->cr_hdr[i] >> 4) & 0x1; q2spi_cr_pkt->cr_hdr[i].type = (q2spi_cr_hdr_event->cr_hdr[i] >> 5) & 0x3; q2spi_cr_pkt->cr_hdr[i].parity = (q2spi_cr_hdr_event->cr_hdr[i] >> 7) & 0x1; - Q2SPI_DEBUG(q2spi, "%s CR HDR[%d] cmd/opcode:%d C_flow:%d type:%d parity:%d\n", - __func__, i, q2spi_cr_pkt->cr_hdr[i].cmd, + Q2SPI_DBG_1(q2spi, "%s CR HDR[%d]:0x%x cmd/opcode:%d C_flow:%d type:%d parity:%d\n", + __func__, i, q2spi_cr_hdr_event->cr_hdr[i], q2spi_cr_pkt->cr_hdr[i].cmd, q2spi_cr_pkt->cr_hdr[i].flow, q2spi_cr_pkt->cr_hdr[i].type, q2spi_cr_pkt->cr_hdr[i].parity); if ((q2spi_cr_hdr_event->cr_hdr[i] & 0xF) == CR_EXTENSION) { @@ -1010,7 +1060,7 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) (q2spi_cr_hdr_event->cr_hdr[i] >> 4) & 0x3; q2spi_cr_pkt->ext_cr_hdr.parity = (q2spi_cr_hdr_event->cr_hdr[i] >> 7) & 0x1; - Q2SPI_DEBUG(q2spi, "%s CR EXT HDR[%d] cmd/opcode:%d dw_len:%d parity:%d\n", + Q2SPI_DBG_2(q2spi, "%s CR EXT HDR[%d] cmd/opcode:%d dw_len:%d parity:%d\n", __func__, i, q2spi_cr_pkt->ext_cr_hdr.cmd, q2spi_cr_pkt->ext_cr_hdr.dw_len, q2spi_cr_pkt->ext_cr_hdr.parity); @@ -1028,7 +1078,7 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) q2spi_cr_pkt->bulk_pkt[i].flow_id = ptr[0] >> 4; ptr += CR_BULK_DATA_SIZE; q2spi_cr_pkt->cr_hdr_type[i] = CR_HDR_BULK; - Q2SPI_DEBUG(q2spi, "%s i:%d cr_hdr_type:0x%x flow_id:%d\n", + Q2SPI_DBG_2(q2spi, "%s i:%d cr_hdr_type:0x%x flow_id:%d\n", __func__, i, q2spi_cr_pkt->cr_hdr_type[i], q2spi_cr_pkt->bulk_pkt[i].flow_id); } else if ((q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS) || @@ -1039,10 +1089,10 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) (char *)ptr, q2spi->db_xfer->rx_len); ptr += CR_DMA_DATA_SIZE; q2spi_cr_pkt->cr_hdr_type[i] = CR_HDR_VAR3; - Q2SPI_DEBUG(q2spi, "%s i:%d cr_hdr_type:0x%x\n", + Q2SPI_DBG_2(q2spi, "%s i:%d cr_hdr_type:0x%x\n", __func__, i, q2spi_cr_pkt->cr_hdr_type[i]); if (q2spi_cr_pkt->var3_pkt[i].arg1 == Q2SPI_CR_TRANSACTION_ERROR) { - Q2SPI_DEBUG(q2spi, "%s arg1:0x%x arg2:0x%x arg3:0x%x\n", + Q2SPI_DBG_1(q2spi, "%s arg1:0x%x arg2:0x%x arg3:0x%x\n", __func__, q2spi_cr_pkt->var3_pkt[i].arg1, q2spi_cr_pkt->var3_pkt[i].arg2, q2spi_cr_pkt->var3_pkt[i].arg3); @@ -1050,10 +1100,9 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) spin_unlock_irqrestore(&q2spi->cr_queue_lock, flags); return 0; } - Q2SPI_DEBUG(q2spi, "%s var3_pkt:%p var3_flow_id:%d\n", + Q2SPI_DBG_2(q2spi, "%s var3_pkt:%p flow_id:%d len_part1:%d part2:%d\n", __func__, &q2spi_cr_pkt->var3_pkt[i], - q2spi_cr_pkt->var3_pkt[i].flow_id); - Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d\n", __func__, + q2spi_cr_pkt->var3_pkt[i].flow_id, q2spi_cr_pkt->var3_pkt[i].dw_len_part1, q2spi_cr_pkt->var3_pkt[i].dw_len_part2); } else if (q2spi_cr_pkt->cr_hdr[i].cmd == CR_EXTENSION) { @@ -1062,7 +1111,7 @@ struct q2spi_cr_packet *q2spi_prepare_cr_pkt(struct q2spi_geni *q2spi) q2spi_cr_pkt->extension_pkt.dw_len = q2spi_cr_pkt->ext_cr_hdr.dw_len; q2spi_cr_pkt->extension_pkt.parity = q2spi_cr_pkt->ext_cr_hdr.parity; ptr += q2spi_cr_pkt->extension_pkt.dw_len * 4 + CR_EXTENSION_DATA_BYTES; - Q2SPI_DEBUG(q2spi, "%s Extension CR cmd:%d dwlen:%d parity:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s Extension CR cmd:%d dwlen:%d parity:%d\n", __func__, q2spi_cr_pkt->extension_pkt.cmd, q2spi_cr_pkt->extension_pkt.dw_len, q2spi_cr_pkt->extension_pkt.parity); @@ -1141,7 +1190,7 @@ static int q2spi_open(struct inode *inode, struct file *filp) q2spi->q2spi_cr_txn_err = false; q2spi->q2spi_sleep_cmd_enable = false; q2spi->q2spi_cr_hdr_err = false; - Q2SPI_DEBUG(q2spi, "%s End PID:%d, allocs:%d\n", + Q2SPI_DBG_2(q2spi, "%s End PID:%d, allocs:%d\n", __func__, current->pid, atomic_read(&q2spi->alloc_count)); err: mutex_unlock(&q2spi->port_lock); @@ -1178,7 +1227,7 @@ static inline void *q2spi_get_variant_buf(struct q2spi_geni *q2spi, if (i < Q2SPI_MAX_BUF) { q2spi->var1_buf_used[i] = q2spi->var1_buf[i]; q2spi_pkt->var1_tx_dma = q2spi->var1_dma_buf[i]; - Q2SPI_DEBUG(q2spi, "%s ALLOC var1 i:%d vir1_buf:%p phy_dma_buf:%p\n", + Q2SPI_DBG_2(q2spi, "%s ALLOC var1 i:%d vir1_buf:%p phy_dma_buf:%p\n", __func__, i, (void *)q2spi->var1_buf[i], (void *)q2spi->var1_dma_buf[i]); return (void *)q2spi->var1_buf[i]; @@ -1191,7 +1240,7 @@ static inline void *q2spi_get_variant_buf(struct q2spi_geni *q2spi, if (i < Q2SPI_MAX_BUF) { q2spi->var5_buf_used[i] = q2spi->var5_buf[i]; q2spi_pkt->var5_tx_dma = q2spi->var5_dma_buf[i]; - Q2SPI_DEBUG(q2spi, "%s ALLOC var5 i:%d vir5_buf:%p phy_dma_buf:%p\n", + Q2SPI_DBG_2(q2spi, "%s ALLOC var5 i:%d vir5_buf:%p phy_dma_buf:%p\n", __func__, i, (void *)q2spi->var5_buf[i], (void *)q2spi->var5_dma_buf[i]); return (void *)q2spi->var5_buf[i]; @@ -1220,7 +1269,7 @@ int q2spi_alloc_xfer_tid(struct q2spi_geni *q2spi) spin_unlock_irqrestore(&q2spi->txn_lock, flags); return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s tid:%d ret:%d\n", __func__, tid, tid); + Q2SPI_DBG_2(q2spi, "%s tid:%d ret:%d\n", __func__, tid, tid); spin_unlock_irqrestore(&q2spi->txn_lock, flags); return tid; } @@ -1235,7 +1284,7 @@ void q2spi_free_xfer_tid(struct q2spi_geni *q2spi, int tid) unsigned long flags; spin_lock_irqsave(&q2spi->txn_lock, flags); - Q2SPI_DEBUG(q2spi, "%s tid:%d\n", __func__, tid); + Q2SPI_DBG_2(q2spi, "%s tid:%d\n", __func__, tid); if (tid < Q2SPI_START_TID_ID || tid > Q2SPI_END_TID_ID) { Q2SPI_ERROR(q2spi, "%s Err Invalid tid:%d\n", __func__, tid); spin_unlock_irqrestore(&q2spi->txn_lock, flags); @@ -1250,7 +1299,7 @@ q2spi_get_dw_offset(struct q2spi_geni *q2spi, enum cmd_type c_type, unsigned int unsigned int offset = 0, remainder = 0, quotient = 0; offset = reg_offset / Q2SPI_OFFSET_MASK; - Q2SPI_DEBUG(q2spi, "%s type:%d offset:%d remainder:%d quotient:%d\n", + Q2SPI_DBG_2(q2spi, "%s type:%d offset:%d remainder:%d quotient:%d\n", __func__, c_type, offset, remainder, quotient); return offset; } @@ -1274,7 +1323,7 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_pt Q2SPI_DEBUG(q2spi, "%s Err Invalid q2spi_hc_var1\n", __func__); return -ENOMEM; } - Q2SPI_DEBUG(q2spi, "%s var_1:%p var_1_phy:%p cmd:%d\n", + Q2SPI_DBG_2(q2spi, "%s var_1:%p var_1_phy:%p cmd:%d\n", __func__, q2spi_hc_var1, (void *)q2spi_pkt->var1_tx_dma, q2spi_req.cmd); if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == HRF_READ) { q2spi_hc_var1->cmd = HC_DATA_READ; @@ -1300,7 +1349,7 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_pt q2spi_hc_var1->dw_len = (q2spi_req.data_len / 4) - 1; q2spi_hc_var1->access_type = LOCAL_REG_ACCESS; q2spi_hc_var1->address_mode = CLIENT_ADDRESS; - Q2SPI_DEBUG(q2spi, "%s data_len:%d dw_len:%d req_flow_id:%d\n", + Q2SPI_DBG_2(q2spi, "%s data_len:%d dw_len:%d req_flow_id:%d\n", __func__, q2spi_req.data_len, q2spi_hc_var1->dw_len, q2spi_req.flow_id); if (!q2spi_req.flow_id && !q2spi->hrf_flow) { ret = q2spi_alloc_xfer_tid(q2spi); @@ -1320,7 +1369,7 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_pt q2spi_pkt->valid = true; q2spi_pkt->sync = q2spi_req.sync; - Q2SPI_DEBUG(q2spi, "%s *q2spi_pkt_ptr:%p End ret flow_id:%d\n", + Q2SPI_DBG_1(q2spi, "%s *q2spi_pkt_ptr:%p End ret flow_id:%d\n", __func__, *q2spi_pkt_ptr, q2spi_hc_var1->flow_id); return q2spi_hc_var1->flow_id; } @@ -1340,11 +1389,11 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_p Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi_pkt\n", __func__); return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt:%p pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n", __func__, q2spi_pkt, q2spi_pkt->var1_pkt, (void *)q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt); - Q2SPI_DEBUG(q2spi, "%s req_cmd:%d req_addr:%d req_len:%d req_data_buf:%p\n", + Q2SPI_DBG_2(q2spi, "%s req_cmd:%d req_addr:%d req_len:%d req_data_buf:%p\n", __func__, q2spi_req.cmd, q2spi_req.addr, q2spi_req.data_len, q2spi_req.data_buff); @@ -1355,14 +1404,11 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_p return -ENOMEM; } memset(q2spi_hc_var5->data_buf, 0xba, 4096); - Q2SPI_DEBUG(q2spi, "%s var_5:%p cmd:%d\n", __func__, q2spi_hc_var5, q2spi_req.cmd); - Q2SPI_DEBUG(q2spi, "%s pkt_var_1:%p pkt_dma:%p pkt_var_5:%p\n", - __func__, q2spi_pkt->var1_pkt, - (void *)q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt); + Q2SPI_DBG_2(q2spi, "%s var_5:%p cmd:%d\n", __func__, q2spi_hc_var5, q2spi_req.cmd); if (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) { Q2SPI_ERROR(q2spi, "%s Err (q2spi_req.data_len > Q2SPI_MAX_DATA_LEN) %d return\n", __func__, q2spi_req.data_len); - Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffer\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Unmapping Var5 buffer\n", __func__); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_rx_dma); return -ENOMEM; } @@ -1372,7 +1418,7 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_p q2spi_pkt->m_cmd_param = Q2SPI_TX_RX; ret = q2spi_get_rx_buf(q2spi_pkt, q2spi_req.data_len); if (ret) { - Q2SPI_DEBUG(q2spi, "%s Unmapping Var5 buffer\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Unmapping Var5 buffer\n", __func__); q2spi_unmap_dma_buf_used(q2spi, q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_rx_dma); return ret; @@ -1404,7 +1450,7 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_p q2spi_hc_var5->dw_len_part1 = (q2spi_req.data_len / 4) - 1; q2spi_hc_var5->dw_len_part2 = ((q2spi_req.data_len / 4) - 1) >> 2; } - Q2SPI_DEBUG(q2spi, "%s dw_len_part1:%d dw_len_part2:%d\n", + Q2SPI_DBG_2(q2spi, "%s dw_len_part1:%d dw_len_part2:%d\n", __func__, q2spi_hc_var5->dw_len_part1, q2spi_hc_var5->dw_len_part2); q2spi_hc_var5->access_type = SYSTEM_MEMORY_ACCESS; q2spi_hc_var5->address_mode = NO_CLIENT_ADDRESS; @@ -1427,7 +1473,7 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_p q2spi_pkt->valid = true; q2spi_pkt->sync = q2spi_req.sync; q2spi_pkt->flow_id = q2spi_hc_var5->flow_id; - Q2SPI_DEBUG(q2spi, "%s flow id:%d q2spi_pkt:%p pkt_var1:%p pkt_tx_dma:%p var5_pkt:%p\n", + Q2SPI_DBG_1(q2spi, "%s flow id:%d q2spi_pkt:%p pkt_var1:%p pkt_tx_dma:%p var5_pkt:%p\n", __func__, q2spi_hc_var5->flow_id, q2spi_pkt, q2spi_pkt->var1_pkt, (void *)q2spi_pkt->var5_tx_dma, q2spi_pkt->var5_pkt); q2spi_dump_ipc(q2spi, "sma format var5(2) data_buf", @@ -1445,7 +1491,7 @@ static int q2spi_abort_command(struct q2spi_geni *q2spi, struct q2spi_request q2 Q2SPI_DEBUG(q2spi, "%s Err Invalid q2spi\n", __func__); return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n", __func__, q2spi_req.cmd, q2spi_req.addr, q2spi_req.flow_id, q2spi_req.data_len); q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__); @@ -1480,7 +1526,7 @@ static int q2spi_soft_reset(struct q2spi_geni *q2spi, struct q2spi_request q2spi Q2SPI_ERROR(q2spi, "%s Err Invalid q2spi\n", __func__); return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s cmd:%d addr:%d flow_id:%d data_len:%d\n", __func__, q2spi_req.cmd, q2spi_req.addr, q2spi_req.flow_id, q2spi_req.data_len); q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__); @@ -1511,7 +1557,7 @@ void q2spi_notify_data_avail_for_client(struct q2spi_geni *q2spi) atomic_inc(&q2spi->rx_avail); wake_up_interruptible(&q2spi->readq); wake_up(&q2spi->read_wq); - Q2SPI_DEBUG(q2spi, "%s wake userspace rx_avail:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s wake userspace rx_avail:%d\n", __func__, atomic_read(&q2spi->rx_avail)); } @@ -1527,11 +1573,11 @@ int q2spi_hrf_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, Q2SPI_DEBUG(q2spi, "%s Err q2spi_hrf_entry_format failed ret:%d\n", __func__, ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s hrf_req cmd:%d flow_id:%d data_buff:%p\n", + Q2SPI_DBG_1(q2spi, "%s hrf_req cmd:%d flow_id:%d data_buff:%p\n", __func__, q2spi_hrf_req->cmd, q2spi_hrf_req->flow_id, q2spi_hrf_req->data_buff); ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA); - Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", __func__, q2spi_hrf_req, q2spi_pkt); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); @@ -1541,7 +1587,7 @@ int q2spi_hrf_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, q2spi_kfree(q2spi, q2spi_hrf_req, __LINE__); *q2spi_pkt_ptr = q2spi_pkt; - Q2SPI_DEBUG(q2spi, "%s End %d\n", __func__, __LINE__); + Q2SPI_DBG_1(q2spi, "%s End %d\n", __func__, __LINE__); return ret; } @@ -1559,13 +1605,13 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, return ret; } - Q2SPI_DEBUG(q2spi, "%s cmd:%d flow_id:%d data_buff:%p\n", + Q2SPI_DBG_1(q2spi, "%s cmd:%d flow_id:%d data_buff:%p\n", __func__, q2spi_req.cmd, q2spi_req.flow_id, q2spi_req.data_buff); - Q2SPI_DEBUG(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n", + Q2SPI_DBG_2(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n", __func__, q2spi_req.addr, q2spi_req.proto_ind, q2spi_req.data_len); ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF); - Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n", __func__, q2spi_hrf_req, q2spi_pkt); if (ret < 0) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); @@ -1586,28 +1632,28 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req, *q2spi_pkt_ptr = q2spi_pkt; q2spi->hrf_flow = false; - Q2SPI_DEBUG(q2spi, "%s End q2spi_pkt:%p\n", __func__, q2spi_pkt); + Q2SPI_DBG_2(q2spi, "%s End q2spi_pkt:%p\n", __func__, q2spi_pkt); return ret; } void q2spi_print_req_cmd(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req) { if (q2spi_req.cmd == LOCAL_REG_READ) - Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_READ\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:LOCAL_REG_READ\n", __func__); else if (q2spi_req.cmd == LOCAL_REG_WRITE) - Q2SPI_DEBUG(q2spi, "%s cmd:LOCAL_REG_WRITE\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:LOCAL_REG_WRITE\n", __func__); else if (q2spi_req.cmd == HRF_READ) - Q2SPI_DEBUG(q2spi, "%s cmd:HRF_READ\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:HRF_READ\n", __func__); else if (q2spi_req.cmd == HRF_WRITE) - Q2SPI_DEBUG(q2spi, "%s cmd:HRF_WRITE\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:HRF_WRITE\n", __func__); else if (q2spi_req.cmd == DATA_READ) - Q2SPI_DEBUG(q2spi, "%s cmd:DATA_READ\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:DATA_READ\n", __func__); else if (q2spi_req.cmd == DATA_WRITE) - Q2SPI_DEBUG(q2spi, "%s cmd:DATA_WRITE\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:DATA_WRITE\n", __func__); else if (q2spi_req.cmd == SOFT_RESET) - Q2SPI_DEBUG(q2spi, "%s cmd:SOFT_RESET\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:SOFT_RESET\n", __func__); else if (q2spi_req.cmd == Q2SPI_HRF_SLEEP_CMD) - Q2SPI_DEBUG(q2spi, "%s cmd:Sleep CMD to Client\n", __func__); + Q2SPI_DBG_2(q2spi, "%s cmd:Sleep CMD to Client\n", __func__); else Q2SPI_DEBUG(q2spi, "%s Invalid cmd:%d\n", __func__, q2spi_req.cmd); } @@ -1636,7 +1682,7 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet * mutex_lock(&q2spi->queue_lock); list_for_each_entry_safe(q2spi_pkt, q2spi_pkt_tmp, &q2spi->tx_queue_list, list) { if (cur_q2spi_pkt == q2spi_pkt) { - Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt:%p state:%s\n", __func__, + Q2SPI_DBG_1(q2spi, "%s Found q2spi_pkt:%p state:%s\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt)); if (q2spi_pkt->state == IN_DELETION) { list_del(&q2spi_pkt->list); @@ -1645,7 +1691,7 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet * break; } } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state:%s\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p state:%s\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt)); } mutex_unlock(&q2spi->queue_lock); @@ -1701,7 +1747,7 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request *q2 q2spi_kfree(q2spi, q2spi_pkt, __LINE__); return ret; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%s ret:%d\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p state=%s ret:%d\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt), ret); list_add_tail(&q2spi_pkt->list, &q2spi->tx_queue_list); } else if (q2spi_req.cmd == HRF_READ || q2spi_req.cmd == HRF_WRITE) { @@ -1739,10 +1785,10 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request *q2 if (q2spi_pkt) { *q2spi_pkt_ptr = q2spi_pkt; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p req_cmd:%d ret:%d\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p req_cmd:%d ret:%d\n", __func__, q2spi_pkt, q2spi_req.cmd, ret); } else { - Q2SPI_DEBUG(q2spi, "%s req_cmd:%d ret:%d\n", __func__, q2spi_req.cmd, ret); + Q2SPI_DBG_1(q2spi, "%s req_cmd:%d ret:%d\n", __func__, q2spi_req.cmd, ret); } return ret; } @@ -1812,13 +1858,13 @@ static int q2spi_wakeup_hw_from_sleep(struct q2spi_geni *q2spi) xfer_timeout = msecs_to_jiffies(EXT_CR_TIMEOUT_MSECS); reinit_completion(&q2spi->wait_for_ext_cr); /* Send gpio wakeup signal on q2spi lines to hw */ - Q2SPI_DEBUG(q2spi, "%s Send wakeup_hw to wakeup client\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Send wakeup_hw to wakeup client\n", __func__); ret = q2spi_wakeup_slave_through_gpio(q2spi); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_wakeup_slave_through_gpio\n", __func__); return ret; } - Q2SPI_DEBUG(q2spi, "%s Waiting for Extended CR\n", __func__); + Q2SPI_DBG_2(q2spi, "%s Waiting for Extended CR\n", __func__); timeout = wait_for_completion_interruptible_timeout(&q2spi->wait_for_ext_cr, xfer_timeout); if (timeout <= 0) { Q2SPI_DEBUG(q2spi, "%s Err timeout %ld for Extended CR\n", __func__, timeout); @@ -1827,7 +1873,7 @@ static int q2spi_wakeup_hw_from_sleep(struct q2spi_geni *q2spi) return -ERESTARTSYS; } } else { - Q2SPI_DEBUG(q2spi, "%s Received Extended CR\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Received Extended CR\n", __func__); } return ret; @@ -1864,7 +1910,7 @@ static int __q2spi_transfer(struct q2spi_geni *q2spi, struct q2spi_request q2spi return -ENOENT; } - Q2SPI_DEBUG(q2spi, "%s is slave_in_sleep:%d\n", + Q2SPI_DBG_1(q2spi, "%s is slave_in_sleep:%d\n", __func__, atomic_read(&q2spi->slave_in_sleep)); ret = __q2spi_send_messages(q2spi, (void *)q2spi_pkt); @@ -1893,7 +1939,7 @@ static int __q2spi_transfer(struct q2spi_geni *q2spi, struct q2spi_request q2spi } if (q2spi_pkt->is_client_sleep_pkt) { - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p client sleep_cmd ret:%d", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p client sleep_cmd ret:%d", __func__, q2spi_pkt, ret); return ret; } @@ -1901,7 +1947,7 @@ static int __q2spi_transfer(struct q2spi_geni *q2spi, struct q2spi_request q2spi if (q2spi_req.cmd == HRF_WRITE) { /* HRF_WRITE */ xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p waiting for bulk_wait\n", __func__, q2spi_pkt); + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p waiting for bulk_wait\n", __func__, q2spi_pkt); timeout = wait_for_completion_interruptible_timeout (&q2spi_pkt->bulk_wait, xfer_timeout); if (timeout <= 0) { @@ -1913,22 +1959,13 @@ static int __q2spi_transfer(struct q2spi_geni *q2spi, struct q2spi_request q2spi } return -ETIMEDOUT; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p bulk_wait completed wait DB clear\n", - __func__, q2spi_pkt); - timeout = wait_event_interruptible(q2spi->read_wq, - !atomic_read(&q2spi->doorbell_pending)); - if (timeout) { - Q2SPI_DEBUG(q2spi, "%s: %p Err db pending interrupted\n", - __func__, q2spi_pkt); - return 0; - } } else if (q2spi_req.cmd == LOCAL_REG_READ) { if (copy_to_user(q2spi_req.data_buff, q2spi_pkt->xfer->rx_buf, q2spi_req.data_len)) { Q2SPI_DEBUG(q2spi, "%s Err copy_to_user fail\n", __func__); return -EFAULT; } - Q2SPI_DEBUG(q2spi, "%s ret data_len:%d\n", __func__, q2spi_req.data_len); + Q2SPI_DBG_1(q2spi, "%s ret data_len:%d\n", __func__, q2spi_req.data_len); return q2spi_req.data_len; } return len; @@ -1960,7 +1997,7 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re mod_timer(&q2spi->slave_sleep_timer, jiffies + msecs_to_jiffies(Q2SPI_SLAVE_SLEEP_TIME_MSECS)); ret = __q2spi_transfer(q2spi, q2spi_req, cur_q2spi_pkt, len); - Q2SPI_DEBUG(q2spi, "%s flow_id:%d ret:%d\n", __func__, flow_id, ret); + Q2SPI_DBG_1(q2spi, "%s flow_id:%d ret:%d\n", __func__, flow_id, ret); q2spi_free_xfer_tid(q2spi, flow_id); if (ret > 0 || i == Q2SPI_MAX_TX_RETRIES) { if (ret == len) @@ -1981,7 +2018,7 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re return ret; } else if (ret == -ETIMEDOUT) { /* Upon transfer failure's retry here */ - Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d q2spi_pkt:%p db_pending:%d\n", + Q2SPI_DBG_1(q2spi, "%s ret:%d retry_count:%d q2spi_pkt:%p db_pending:%d\n", __func__, ret, i + 1, cur_q2spi_pkt, atomic_read(&q2spi->doorbell_pending)); if (q2spi->gsi->qup_gsi_global_err) { @@ -2042,7 +2079,7 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re ret = -ENOMEM; goto pm_put_exit; } - Q2SPI_DEBUG(q2spi, "%s cur_q2spi_pkt=%p\n", __func__, cur_q2spi_pkt); + Q2SPI_DBG_1(q2spi, "%s cur_q2spi_pkt=%p\n", __func__, cur_q2spi_pkt); } else { /* Upon SW error break here */ break; @@ -2054,10 +2091,10 @@ transfer_exit: q2spi_free_q2spi_pkt(cur_q2spi_pkt, __LINE__); pm_put_exit: pm_runtime_mark_last_busy(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count), __LINE__); pm_runtime_put_autosuspend(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM after put_autosuspend count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); return ret; } @@ -2074,7 +2111,7 @@ void q2spi_transfer_abort(struct q2spi_geni *q2spi) struct q2spi_request abort_request; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s ABORT\n", __func__); + Q2SPI_DBG_2(q2spi, "%s ABORT\n", __func__); abort_request.cmd = ABORT; abort_request.sync = 1; mutex_lock(&q2spi->queue_lock); @@ -2107,7 +2144,7 @@ void q2spi_transfer_soft_reset(struct q2spi_geni *q2spi) struct q2spi_request soft_reset_request; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s SOFT_RESET\n", __func__); + Q2SPI_DBG_2(q2spi, "%s SOFT_RESET\n", __func__); soft_reset_request.cmd = SOFT_RESET; soft_reset_request.sync = 1; mutex_lock(&q2spi->queue_lock); @@ -2171,10 +2208,11 @@ static int q2spi_transfer_check(struct q2spi_geni *q2spi, struct q2spi_request * Q2SPI_DEBUG(q2spi, "%s Err copy_from_user failed\n", __func__); return -EFAULT; } - Q2SPI_DEBUG(q2spi, "%s cmd:%d data_len:%d addr:%d proto:%d ep:%d\n", + + Q2SPI_DBG_2(q2spi, "%s cmd:%d data_len:%d addr:%d proto:%d ep:%d\n", __func__, q2spi_req->cmd, q2spi_req->data_len, q2spi_req->addr, q2spi_req->proto_ind, q2spi_req->end_point); - Q2SPI_DEBUG(q2spi, "%s priority:%d flow_id:%d sync:%d\n", + Q2SPI_DBG_2(q2spi, "%s priority:%d flow_id:%d sync:%d\n", __func__, q2spi_req->priority, q2spi_req->flow_id, q2spi_req->sync); if (!q2spi_cmd_type_valid(q2spi, q2spi_req)) @@ -2186,7 +2224,11 @@ static int q2spi_transfer_check(struct q2spi_geni *q2spi, struct q2spi_request * } if (q2spi_req->reserved[0] & Q2SPI_SLEEP_CMD_BIT) { - Q2SPI_DEBUG(q2spi, "%s allow_sleep\n", __func__); + Q2SPI_DBG_1(q2spi, "%s allow_sleep\n", __func__); + if (q2spi->q2spi_log_lvl) + q2spi->q2spi_log_lvl = q2spi->q2spi_log_lvl; + else + q2spi->q2spi_log_lvl = LOG_DBG_LEVEL1; q2spi->q2spi_sleep_cmd_enable = true; } else { q2spi->q2spi_sleep_cmd_enable = false; @@ -2218,7 +2260,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t return -EINVAL; } q2spi = filp->private_data; - Q2SPI_DEBUG(q2spi, "In %s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "In %s Enter PID=%d\n", __func__, current->pid); mutex_lock(&q2spi->port_lock); ret = q2spi_transfer_check(q2spi, &q2spi_req, buf, len); @@ -2249,16 +2291,17 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t goto err; } user_buf = q2spi_req.data_buff; - q2spi_dump_ipc(q2spi, "q2spi_transfer", (char *)data_buf, q2spi_req.data_len); + q2spi_dump_ipc_always(q2spi, "q2spi_transfer", (char *)data_buf, + q2spi_req.data_len); q2spi_req.data_buff = data_buf; } if (atomic_read(&q2spi->doorbell_pending)) { - Q2SPI_DEBUG(q2spi, "%s CR Doorbell Pending\n", __func__); + Q2SPI_DBG_1(q2spi, "%s CR Doorbell Pending\n", __func__); usleep_range(1000, 2000); } - Q2SPI_DEBUG(q2spi, "%s PM get_sync count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); ret = pm_runtime_get_sync(q2spi->dev); if (ret < 0) { @@ -2270,7 +2313,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t q2spi->is_start_seq_fail = false; reinit_completion(&q2spi->wait_comp_start_fail); - Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM after get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); q2spi_wait_for_doorbell_setup_ready(q2spi); mutex_lock(&q2spi->queue_lock); @@ -2289,9 +2332,8 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t goto err; } - Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, flow_id); ret = q2spi_transfer_with_retries(q2spi, q2spi_req, cur_q2spi_pkt, len, flow_id, user_buf); - Q2SPI_DEBUG(q2spi, "%s transfer_with_retries ret:%d\n", __func__, ret); + Q2SPI_DBG_1(q2spi, "%s transfer_with_retries ret:%d\n", __func__, ret); err: mutex_unlock(&q2spi->port_lock); @@ -2317,7 +2359,7 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, q2spi = filp->private_data; - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s Enter PID=%d\n", __func__, current->pid); if (q2spi->hw_state_is_bad) { Q2SPI_DEBUG(q2spi, "%s Err Retries failed, check HW state\n", __func__); ret = -EPIPE; @@ -2330,16 +2372,16 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, goto err; } - Q2SPI_DEBUG(q2spi, "%s PM get_sync count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); ret = pm_runtime_get_sync(q2spi->dev); if (ret < 0) { - Q2SPI_DEBUG(q2spi, "%s Err for PM get\n", __func__); + Q2SPI_ERROR(q2spi, "%s Err for PM get\n", __func__); pm_runtime_put_noidle(q2spi->dev); pm_runtime_set_suspended(q2spi->dev); goto err; } - Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM after get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); q2spi_tx_queue_status(q2spi); if (copy_from_user(&cr_request, buf, sizeof(struct q2spi_client_request)) != 0) { @@ -2348,7 +2390,7 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, goto err; } - Q2SPI_DEBUG(q2spi, "%s waiting on wait_event_interruptible rx_avail:%d\n", + Q2SPI_DBG_1(q2spi, "%s waiting on wait_event_interruptible rx_avail:%d\n", __func__, atomic_read(&q2spi->rx_avail)); /* Wait for Rx data available with timeout */ timeout = wait_event_interruptible_timeout(q2spi->read_wq, atomic_read(&q2spi->rx_avail), @@ -2363,11 +2405,11 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, list_for_each_entry_safe(q2spi_pkt_tmp1, q2spi_pkt_tmp2, &q2spi->tx_queue_list, list) { if (q2spi_pkt_tmp1->state == DATA_AVAIL) { q2spi_pkt = q2spi_pkt_tmp1; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p data avail for user\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt %p data avail for user\n", __func__, q2spi_pkt); break; } - Q2SPI_DEBUG(q2spi, "%s check q2spi_pkt %p state:%s\n", + Q2SPI_DBG_1(q2spi, "%s check q2spi_pkt %p state:%s\n", __func__, q2spi_pkt_tmp1, q2spi_pkt_state(q2spi_pkt_tmp1)); } mutex_unlock(&q2spi->queue_lock); @@ -2378,13 +2420,13 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, goto err; } - Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt = %p, cr_hdr_type:0x%x\n", + Q2SPI_DBG_2(q2spi, "%s Found q2spi_pkt = %p, cr_hdr_type:0x%x\n", __func__, q2spi_pkt, q2spi_pkt->cr_hdr_type); if (q2spi_pkt->cr_hdr_type == CR_HDR_VAR3) { q2spi_cr_var3 = &q2spi_pkt->cr_var3; - Q2SPI_DEBUG(q2spi, "q2spi_cr_var3 len_part1:%d len_part2:%d\n", + Q2SPI_DBG_2(q2spi, "q2spi_cr_var3 len_part1:%d len_part2:%d\n", q2spi_cr_var3->dw_len_part1, q2spi_cr_var3->dw_len_part2); - Q2SPI_DEBUG(q2spi, "q2spi_cr_var3 flow_id:%d arg1:0x%x arg2:0x%x arg3:0x%x\n", + Q2SPI_DBG_2(q2spi, "q2spi_cr_var3 flow_id:%d arg1:0x%x arg2:0x%x arg3:0x%x\n", q2spi_cr_var3->flow_id, q2spi_cr_var3->arg1, q2spi_cr_var3->arg2, q2spi_cr_var3->arg3); /* @@ -2397,18 +2439,18 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, cr_request.data_len = q2spi_pkt->var3_data_len; cr_request.end_point = q2spi_cr_var3->arg2; cr_request.proto_ind = q2spi_cr_var3->arg3; - Q2SPI_DEBUG(q2spi, "%s CR cmd:%d flow_id:%d len:%d ep:%d proto:%d status:%d\n", + Q2SPI_DBG_2(q2spi, "%s CR cmd:%d flow_id:%d len:%d ep:%d proto:%d status:%d\n", __func__, cr_request.cmd, cr_request.flow_id, cr_request.data_len, cr_request.end_point, cr_request.proto_ind, cr_request.status); } else if (q2spi_pkt->cr_hdr_type == CR_HDR_BULK) { - Q2SPI_DEBUG(q2spi, "%s cr_request.flow_id:%d status:%d\n", + Q2SPI_DBG_1(q2spi, "%s cr_request.flow_id:%d status:%d\n", __func__, cr_request.flow_id, cr_request.status); } else { Q2SPI_ERROR(q2spi, "%s Err Unsupported CR Type\n", __func__); ret = -EINVAL; goto err; } - Q2SPI_DEBUG(q2spi, "%s data_len:%d ep:%d proto:%d cmd%d status%d flow_id:%d", + Q2SPI_DBG_1(q2spi, "%s data_len:%d ep:%d proto:%d cmd%d status%d flow_id:%d", __func__, cr_request.data_len, cr_request.end_point, cr_request.proto_ind, cr_request.cmd, cr_request.status, cr_request.flow_id); if (!q2spi_pkt->xfer || !q2spi_pkt->xfer->rx_buf) { @@ -2417,8 +2459,8 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, goto err; } - q2spi_dump_ipc(q2spi, "q2spi_response", - (char *)q2spi_pkt->xfer->rx_buf, cr_request.data_len); + q2spi_dump_ipc_always(q2spi, "q2spi_response", + (char *)q2spi_pkt->xfer->rx_buf, cr_request.data_len); ret = copy_to_user(buf, &cr_request, sizeof(struct q2spi_client_request)); if (ret) { Q2SPI_ERROR(q2spi, "%s Err copy_to_user failed ret:%d", __func__, ret); @@ -2435,21 +2477,21 @@ static ssize_t q2spi_response(struct file *filp, char __user *buf, size_t count, ret = (sizeof(struct q2spi_client_request) - ret); q2spi_tx_queue_status(q2spi); - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state:%s\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p state:%s\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt)); q2spi_unmap_rx_buf(q2spi_pkt); q2spi_pkt->state = IN_DELETION; if (q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt)) q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__); - Q2SPI_DEBUG(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM put_autosuspend count:%d line:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count), __LINE__); pm_runtime_mark_last_busy(q2spi->dev); pm_runtime_put_autosuspend(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s PM after put_autosuspend count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM after put_autosuspend count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); err: - Q2SPI_DEBUG(q2spi, "%s End ret:%d PID=%d", __func__, ret, current->pid); + Q2SPI_DBG_1(q2spi, "%s End ret:%d PID=%d", __func__, ret, current->pid); return ret; } @@ -2468,10 +2510,10 @@ static __poll_t q2spi_poll(struct file *filp, poll_table *wait) q2spi = filp->private_data; poll_wait(filp, &q2spi->readq, wait); - Q2SPI_DEBUG(q2spi, "%s PID:%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID:%d\n", __func__, current->pid); if (atomic_read(&q2spi->rx_avail)) { mask = (POLLIN | POLLRDNORM); - Q2SPI_DEBUG(q2spi, "%s RX data available\n", __func__); + Q2SPI_DBG_1(q2spi, "%s RX data available\n", __func__); } return mask; } @@ -2486,7 +2528,7 @@ static void q2spi_flush_pending_crs(struct q2spi_geni *q2spi) { struct q2spi_packet *q2spi_pkt = NULL, *q2spi_pkt_tmp; - Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s: PID=%d\n", __func__, current->pid); /* Delay to ensure any pending CRs in progress are consumed */ usleep_range(10000, 20000); q2spi_tx_queue_status(q2spi); @@ -2494,7 +2536,7 @@ static void q2spi_flush_pending_crs(struct q2spi_geni *q2spi) mutex_lock(&q2spi->queue_lock); list_for_each_entry_safe(q2spi_pkt, q2spi_pkt_tmp, &q2spi->tx_queue_list, list) { if (q2spi_pkt->state == DATA_AVAIL || q2spi_pkt->state == IN_USE) { - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p data avail, force delete\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt %p data avail, force delete\n", __func__, q2spi_pkt); q2spi_unmap_rx_buf(q2spi_pkt); q2spi_pkt->state = IN_DELETION; @@ -2502,7 +2544,7 @@ static void q2spi_flush_pending_crs(struct q2spi_geni *q2spi) list_del(&q2spi_pkt->list); q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__); } else { - Q2SPI_DEBUG(q2spi, "%s Check q2spi_pkt %p state:%s!!!\n", + Q2SPI_DBG_1(q2spi, "%s Check q2spi_pkt %p state:%s!!!\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt)); } } @@ -2524,6 +2566,7 @@ static int q2spi_release(struct inode *inode, struct file *filp) } q2spi = filp->private_data; + q2spi->q2spi_log_lvl = LOG_DBG_LEVEL0; Q2SPI_DEBUG(q2spi, "%s PID:%d allocs:%d\n", __func__, current->pid, atomic_read(&q2spi->alloc_count)); mutex_lock(&q2spi->port_lock); @@ -2534,7 +2577,7 @@ static int q2spi_release(struct inode *inode, struct file *filp) q2spi->hw_state_is_bad = false; if (mutex_is_locked(&q2spi->send_msgs_lock)) { - Q2SPI_DEBUG(q2spi, "%s q2spi_transfer is in progress\n", __func__); + Q2SPI_DBG_1(q2spi, "%s q2spi_transfer is in progress\n", __func__); usleep_range(200000, 250000); } @@ -2556,7 +2599,7 @@ static int q2spi_release(struct inode *inode, struct file *filp) mutex_unlock(&q2spi->port_lock); if (!atomic_read(&q2spi->is_suspend)) { ret = pm_runtime_suspend(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s suspend ret:%d sys_mem_read_in_progress:%d\n", + Q2SPI_DBG_1(q2spi, "%s suspend ret:%d sys_mem_read_in_progress:%d\n", __func__, ret, q2spi->sys_mem_read_in_progress); } q2spi->port_release = true; @@ -2567,7 +2610,7 @@ static int q2spi_release(struct inode *inode, struct file *filp) __func__, ret); } - Q2SPI_DEBUG(q2spi, "%s End allocs:%d rx_avail:%d retry:%d slave_in_sleep:%d\n", + Q2SPI_DBG_1(q2spi, "%s End allocs:%d rx_avail:%d retry:%d slave_in_sleep:%d\n", __func__, atomic_read(&q2spi->alloc_count), atomic_read(&q2spi->rx_avail), atomic_read(&q2spi->retry), atomic_read(&q2spi->slave_in_sleep)); return 0; @@ -2608,7 +2651,7 @@ static int q2spi_se_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, res_freq = (sclk_freq / (*clk_div)); - Q2SPI_DEBUG(q2spi, "%s req speed:%u resultant:%lu sclk:%lu, idx:%d, div:%d\n", + Q2SPI_DBG_1(q2spi, "%s req speed:%u resultant:%lu sclk:%lu, idx:%d, div:%d\n", __func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div); ret = clk_set_rate(se->clk, sclk_freq); @@ -2658,7 +2701,7 @@ static int q2spi_set_clock(struct q2spi_geni *q2spi, unsigned long clk_hz) writel(clk_sel, se->base + SE_GENI_CLK_SEL); writel(q2spi->m_clk_cfg, se->base + GENI_SER_M_CLK_CFG); - Q2SPI_DEBUG(q2spi, "%s speed_hz:%u clk_sel:0x%x m_clk_cfg:0x%x div:%d\n", + Q2SPI_DBG_1(q2spi, "%s speed_hz:%u clk_sel:0x%x m_clk_cfg:0x%x div:%d\n", __func__, q2spi->cur_speed_hz, clk_sel, q2spi->m_clk_cfg, div); return ret; } @@ -2671,52 +2714,52 @@ void q2spi_geni_se_dump_regs(struct q2spi_geni *q2spi) mutex_unlock(&q2spi->geni_resource_lock); return; } - Q2SPI_DEBUG(q2spi, "GENI_GENERAL_CFG: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_GENERAL_CFG: 0x%x\n", geni_read_reg(q2spi->base, GENI_GENERAL_CFG)); - Q2SPI_DEBUG(q2spi, "GENI_OUTPUT_CTRL: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_OUTPUT_CTRL: 0x%x\n", geni_read_reg(q2spi->base, GENI_OUTPUT_CTRL)); - Q2SPI_DEBUG(q2spi, "GENI_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_STATUS)); - Q2SPI_DEBUG(q2spi, "GENI_CLK_CTRL_RO: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_STATUS)); + Q2SPI_DBG_1(q2spi, "GENI_CLK_CTRL_RO: 0x%x\n", geni_read_reg(q2spi->base, GENI_CLK_CTRL_RO)); - Q2SPI_DEBUG(q2spi, "GENI_FW_MULTILOCK_MSA_RO: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_FW_MULTILOCK_MSA_RO: 0x%x\n", geni_read_reg(q2spi->base, GENI_FW_MULTILOCK_MSA_RO)); - Q2SPI_DEBUG(q2spi, "GENI_IF_DISABLE_RO: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_IF_DISABLE_RO: 0x%x\n", geni_read_reg(q2spi->base, GENI_IF_DISABLE_RO)); - Q2SPI_DEBUG(q2spi, "SE_GENI_CLK_SEL: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_CLK_SEL)); - Q2SPI_DEBUG(q2spi, "SPI_TRANS_CFG: 0x%x\n", geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG)); - Q2SPI_DEBUG(q2spi, "SE_GENI_IOS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_IOS)); - Q2SPI_DEBUG(q2spi, "SE_GENI_M_CMD0: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_CMD0)); - Q2SPI_DEBUG(q2spi, "GENI_M_CMD_CTRL_REG: 0x%x\n", + Q2SPI_DBG_1(q2spi, "SE_GENI_CLK_SEL: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_CLK_SEL)); + Q2SPI_DBG_1(q2spi, "SPI_TRANS_CFG: 0x%x\n", geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG)); + Q2SPI_DBG_1(q2spi, "SE_GENI_IOS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_IOS)); + Q2SPI_DBG_1(q2spi, "SE_GENI_M_CMD0: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_CMD0)); + Q2SPI_DBG_1(q2spi, "GENI_M_CMD_CTRL_REG: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_CMD_CTRL_REG)); - Q2SPI_DEBUG(q2spi, "GENI_M_IRQ_STATUS: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_M_IRQ_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_IRQ_STATUS)); - Q2SPI_DEBUG(q2spi, "GENI_M_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_IRQ_EN)); - Q2SPI_DEBUG(q2spi, "GENI_TX_FIFO_STATUS: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_M_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_M_IRQ_EN)); + Q2SPI_DBG_1(q2spi, "GENI_TX_FIFO_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_TX_FIFO_STATUS)); - Q2SPI_DEBUG(q2spi, "GENI_RX_FIFO_STATUS: 0x%x\n", + Q2SPI_DBG_1(q2spi, "GENI_RX_FIFO_STATUS: 0x%x\n", geni_read_reg(q2spi->base, SE_GENI_RX_FIFO_STATUS)); - Q2SPI_DEBUG(q2spi, "DMA_TX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_L)); - Q2SPI_DEBUG(q2spi, "DMA_TX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_H)); - Q2SPI_DEBUG(q2spi, "DMA_TX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_ATTR)); - Q2SPI_DEBUG(q2spi, "DMA_TX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN)); - Q2SPI_DEBUG(q2spi, "DMA_TX_IRQ_STAT: 0x%x\n", + Q2SPI_DBG_1(q2spi, "DMA_TX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_L)); + Q2SPI_DBG_1(q2spi, "DMA_TX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_PTR_H)); + Q2SPI_DBG_1(q2spi, "DMA_TX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_ATTR)); + Q2SPI_DBG_1(q2spi, "DMA_TX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN)); + Q2SPI_DBG_1(q2spi, "DMA_TX_IRQ_STAT: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT)); - Q2SPI_DEBUG(q2spi, "DMA_TX_IRQ_EN: 0x%x\n", + Q2SPI_DBG_1(q2spi, "DMA_TX_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_EN)); - Q2SPI_DEBUG(q2spi, "DMA_TX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN_IN)); - Q2SPI_DEBUG(q2spi, "DMA_RX_IRQ_EN: 0x%x\n", + Q2SPI_DBG_1(q2spi, "DMA_TX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_TX_LEN_IN)); + Q2SPI_DBG_1(q2spi, "DMA_RX_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_EN)); - Q2SPI_DEBUG(q2spi, "DMA_RX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_L)); - Q2SPI_DEBUG(q2spi, "DMA_RX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_H)); - Q2SPI_DEBUG(q2spi, "DMA_RX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_ATTR)); - Q2SPI_DEBUG(q2spi, "DMA_RX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN)); - Q2SPI_DEBUG(q2spi, "DMA_RX_IRQ_STAT: 0x%x\n", + Q2SPI_DBG_1(q2spi, "DMA_RX_PTR_L: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_L)); + Q2SPI_DBG_1(q2spi, "DMA_RX_PTR_H: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_PTR_H)); + Q2SPI_DBG_1(q2spi, "DMA_RX_ATTR: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_ATTR)); + Q2SPI_DBG_1(q2spi, "DMA_RX_LEN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN)); + Q2SPI_DBG_1(q2spi, "DMA_RX_IRQ_STAT: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT)); - Q2SPI_DEBUG(q2spi, "DMA_RX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN_IN)); - Q2SPI_DEBUG(q2spi, "DMA_DEBUG_REG0: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_DEBUG_REG0)); - Q2SPI_DEBUG(q2spi, "SE_GSI_EVENT_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GSI_EVENT_EN)); - Q2SPI_DEBUG(q2spi, "SE_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_IRQ_EN)); - Q2SPI_DEBUG(q2spi, "DMA_IF_EN_RO: 0x%x\n", geni_read_reg(q2spi->base, DMA_IF_EN_RO)); + Q2SPI_DBG_1(q2spi, "DMA_RX_LEN_IN: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_RX_LEN_IN)); + Q2SPI_DBG_1(q2spi, "DMA_DEBUG_REG0: 0x%x\n", geni_read_reg(q2spi->base, SE_DMA_DEBUG_REG0)); + Q2SPI_DBG_1(q2spi, "SE_GSI_EVENT_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_GSI_EVENT_EN)); + Q2SPI_DBG_1(q2spi, "SE_IRQ_EN: 0x%x\n", geni_read_reg(q2spi->base, SE_IRQ_EN)); + Q2SPI_DBG_1(q2spi, "DMA_IF_EN_RO: 0x%x\n", geni_read_reg(q2spi->base, DMA_IF_EN_RO)); mutex_unlock(&q2spi->geni_resource_lock); } @@ -2724,7 +2767,7 @@ static irqreturn_t q2spi_geni_wakeup_isr(int irq, void *data) { struct q2spi_geni *q2spi = data; - Q2SPI_DEBUG(q2spi, "%s PID:%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s PID:%d\n", __func__, current->pid); irq_set_irq_type(q2spi->doorbell_irq, IRQ_TYPE_EDGE_RISING); atomic_set(&q2spi->slave_in_sleep, 0); schedule_work(&q2spi->q2spi_wakeup_work); @@ -2743,7 +2786,7 @@ static irqreturn_t q2spi_geni_irq(int irq, void *data) s_irq_status = geni_read_reg(q2spi->base, SE_GENI_S_IRQ_STATUS); dma_tx_status = geni_read_reg(q2spi->base, SE_DMA_TX_IRQ_STAT); dma_rx_status = geni_read_reg(q2spi->base, SE_DMA_RX_IRQ_STAT); - Q2SPI_DEBUG(q2spi, "%s sirq 0x%x mirq:0x%x dma_tx:0x%x dma_rx:0x%x\n", + Q2SPI_DBG_1(q2spi, "%s sirq 0x%x mirq:0x%x dma_tx:0x%x dma_rx:0x%x\n", __func__, s_irq_status, m_irq_status, dma_tx_status, dma_rx_status); geni_write_reg(m_irq_status, q2spi->base, SE_GENI_M_IRQ_CLEAR); geni_write_reg(s_irq_status, q2spi->base, SE_GENI_S_IRQ_CLEAR); @@ -2779,10 +2822,10 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) struct q2spi_dma_transfer *xfer = q2spi_pkt->xfer; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s PID:%d q2spi:%p xfer:%p wait for gsi_lock 2\n", + Q2SPI_DBG_2(q2spi, "%s PID:%d q2spi:%p xfer:%p wait for gsi_lock 2\n", __func__, current->pid, q2spi, xfer); mutex_lock(&q2spi->gsi_lock); - Q2SPI_DEBUG(q2spi, "%s PID=%d acquired gsi_lock 2\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID=%d acquired gsi_lock 2\n", __func__, current->pid); ret = q2spi_setup_gsi_xfer(q2spi_pkt); if (ret) { Q2SPI_DEBUG(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret); @@ -2790,7 +2833,7 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) del_timer_sync(&q2spi->slave_sleep_timer); goto unmap_buf; } - Q2SPI_DEBUG(q2spi, "%s PID:%d waiting check_gsi_transfer_completion\n", + Q2SPI_DBG_2(q2spi, "%s PID:%d waiting check_gsi_transfer_completion\n", __func__, current->pid); ret = check_gsi_transfer_completion(q2spi); if (ret) { @@ -2801,7 +2844,7 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt) goto unmap_buf; } - Q2SPI_DEBUG(q2spi, "%s End PID:%d flow_id:%d tx_dma:%p rx_dma:%p, relased gsi_lock 2", + Q2SPI_DBG_1(q2spi, "%s End PID:%d flow_id:%d tx_dma:%p rx_dma:%p, relased gsi_lock 2", __func__, current->pid, q2spi_pkt->xfer->tid, (void *)xfer->tx_dma, (void *)xfer->rx_dma); unmap_buf: @@ -2824,7 +2867,7 @@ static int q2spi_prep_soft_reset_request(struct q2spi_geni *q2spi, struct q2spi_ struct q2spi_host_soft_reset_pkt *reset_pkt; struct q2spi_dma_transfer *reset_xfer = q2spi_pkt->xfer; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->soft_reset_pkt:%p &q2spi_pkt->soft_reset_pkt:%p\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt->soft_reset_pkt:%p &q2spi_pkt->soft_reset_pkt:%p\n", __func__, q2spi_pkt->soft_reset_pkt, &q2spi_pkt->soft_reset_pkt); reset_xfer->cmd = q2spi_pkt->m_cmd_param; reset_pkt = q2spi_pkt->soft_reset_pkt; @@ -2832,10 +2875,10 @@ static int q2spi_prep_soft_reset_request(struct q2spi_geni *q2spi, struct q2spi_ reset_xfer->tx_dma = q2spi_pkt->soft_reset_tx_dma; reset_xfer->tx_data_len = 0; reset_xfer->tx_len = Q2SPI_HEADER_LEN; - Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", __func__, reset_xfer->tx_len, reset_xfer->tx_data_len); - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p\n", __func__, + Q2SPI_DBG_1(q2spi, "%s tx_buf:%p tx_dma:%p\n", __func__, reset_xfer->tx_buf, (void *)reset_xfer->tx_dma); q2spi_dump_ipc(q2spi, "Preparing soft reset tx_buf DMA TX", (char *)reset_xfer->tx_buf, reset_xfer->tx_len); @@ -2856,30 +2899,30 @@ static int q2spi_prep_var1_request(struct q2spi_geni *q2spi, struct q2spi_packet struct q2spi_host_variant1_pkt *q2spi_hc_var1; struct q2spi_dma_transfer *var1_xfer = q2spi_pkt->xfer; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt:%p\n", __func__, q2spi_pkt->var1_pkt); + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt->var1_pkt:%p\n", __func__, q2spi_pkt->var1_pkt); var1_xfer->cmd = q2spi_pkt->m_cmd_param; q2spi_hc_var1 = q2spi_pkt->var1_pkt; var1_xfer->tx_buf = q2spi_pkt->var1_pkt; var1_xfer->tx_dma = q2spi_pkt->var1_tx_dma; var1_xfer->tx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4; var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len; - Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", __func__, var1_xfer->tx_len, var1_xfer->tx_data_len); var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id; if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) { var1_xfer->tx_len = Q2SPI_HEADER_LEN; - Q2SPI_DEBUG(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", + Q2SPI_DBG_2(q2spi, "%s var1_xfer->tx_len:%d var1_xfer->tx_data_len:%d\n", __func__, var1_xfer->tx_len, var1_xfer->tx_data_len); var1_xfer->rx_buf = q2spi_pkt->xfer->rx_buf; var1_xfer->rx_dma = q2spi_pkt->xfer->rx_dma; q2spi_pkt->var1_rx_dma = var1_xfer->rx_dma; var1_xfer->rx_data_len = (q2spi_pkt->var1_pkt->dw_len * 4) + 4; var1_xfer->rx_len = var1_xfer->rx_data_len; - Q2SPI_DEBUG(q2spi, "%s var1_xfer->rx_len:%d var1_xfer->rx_data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s var1_xfer->rx_len:%d var1_xfer->rx_data_len:%d\n", __func__, var1_xfer->rx_len, var1_xfer->rx_data_len); } - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, + Q2SPI_DBG_1(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, var1_xfer->tx_buf, (void *)var1_xfer->tx_dma, var1_xfer->rx_buf, (void *)var1_xfer->rx_dma); q2spi_dump_ipc(q2spi, "Preparing var1 tx_buf DMA TX", @@ -2901,7 +2944,7 @@ static int q2spi_prep_var5_request(struct q2spi_geni *q2spi, struct q2spi_packet struct q2spi_host_variant4_5_pkt *q2spi_hc_var5; struct q2spi_dma_transfer *var5_xfer = q2spi_pkt->xfer; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var5_pkt:%p var5_tx_dma:%p\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_pkt->var5_pkt:%p var5_tx_dma:%p\n", __func__, q2spi_pkt->var5_pkt, (void *)q2spi_pkt->var5_tx_dma); q2spi_hc_var5 = q2spi_pkt->var5_pkt; var5_xfer->cmd = q2spi_pkt->m_cmd_param; @@ -2910,7 +2953,7 @@ static int q2spi_prep_var5_request(struct q2spi_geni *q2spi, struct q2spi_packet var5_xfer->tid = q2spi_pkt->var5_pkt->flow_id; var5_xfer->tx_data_len = q2spi_pkt->data_length; var5_xfer->tx_len = Q2SPI_HEADER_LEN + var5_xfer->tx_data_len; - Q2SPI_DEBUG(q2spi, "%s var5_xfer->tx_len:%d var5_xfer->tx_data_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s var5_xfer->tx_len:%d var5_xfer->tx_data_len:%d\n", __func__, var5_xfer->tx_len, var5_xfer->tx_data_len); if (q2spi_pkt->m_cmd_param == Q2SPI_TX_RX) { var5_xfer->rx_buf = q2spi_pkt->xfer->rx_buf; @@ -2921,21 +2964,22 @@ static int q2spi_prep_var5_request(struct q2spi_geni *q2spi, struct q2spi_packet ((q2spi_pkt->var5_pkt->dw_len_part1 | q2spi_pkt->var5_pkt->dw_len_part2 << 2) * 4) + 4; var5_xfer->rx_data_len = q2spi_pkt->data_length; - Q2SPI_DEBUG(q2spi, "%s var5_pkt:%p cmd:%d flow_id:0x%x len_part1:%d len_part2:%d\n", + Q2SPI_DBG_2(q2spi, "%s var5_pkt:%p cmd:%d flow_id:0x%x len_part1:%d len_part2:%d\n", __func__, q2spi_pkt->var5_pkt, q2spi_pkt->var5_pkt->cmd, q2spi_pkt->var5_pkt->flow_id, q2spi_pkt->var5_pkt->dw_len_part1, q2spi_pkt->var5_pkt->dw_len_part2); - Q2SPI_DEBUG(q2spi, "%s var5_pkt data_buf:%p var5_xfer->rx_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s var5_pkt data_buf:%p var5_xfer->rx_len:%d\n", __func__, q2spi_pkt->var5_pkt->data_buf, var5_xfer->rx_len); } - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, + Q2SPI_DBG_1(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, var5_xfer->tx_buf, (void *)var5_xfer->tx_dma, var5_xfer->rx_buf, (void *)var5_xfer->rx_dma); q2spi_dump_ipc(q2spi, "Preparing var5 tx_buf DMA TX", (char *)var5_xfer->tx_buf, Q2SPI_HEADER_LEN); if (q2spi_pkt->m_cmd_param == Q2SPI_TX_ONLY) q2spi_dump_ipc(q2spi, "Preparing var5 data_buf DMA TX", - (void *)q2spi_pkt->var5_pkt->data_buf, var5_xfer->tx_data_len); + (void *)q2spi_pkt->var5_pkt->data_buf, + var5_xfer->tx_data_len); return 0; } @@ -2962,13 +3006,13 @@ static int q2spi_prep_hrf_request(struct q2spi_geni *q2spi, struct q2spi_packet var1_xfer->tx_len = Q2SPI_HEADER_LEN + var1_xfer->tx_data_len; var1_xfer->tid = q2spi_pkt->var1_pkt->flow_id; var1_xfer->rx_len = RX_DMA_CR_BUF_SIZE; - Q2SPI_DEBUG(q2spi, "%s var1_pkt:%p var1_pkt_phy:%p cmd:%d addr:0x%x flow_id:0x%x\n", + Q2SPI_DBG_2(q2spi, "%s var1_pkt:%p var1_pkt_phy:%p cmd:%d addr:0x%x flow_id:0x%x\n", __func__, q2spi_pkt->var1_pkt, (void *)q2spi_pkt->var1_tx_dma, q2spi_pkt->var1_pkt->cmd, q2spi_pkt->var1_pkt->reg_offset, q2spi_pkt->var1_pkt->flow_id); - Q2SPI_DEBUG(q2spi, "%s var1_pkt: len:%d data_buf %p\n", + Q2SPI_DBG_1(q2spi, "%s var1_pkt: len:%d data_buf %p\n", __func__, q2spi_pkt->var1_pkt->dw_len, q2spi_pkt->var1_pkt->data_buf); - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", + Q2SPI_DBG_2(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p\n", __func__, var1_xfer->tx_buf, (void *)var1_xfer->tx_dma, var1_xfer->rx_buf, (void *)var1_xfer->rx_dma); q2spi_dump_ipc(q2spi, "Preparing var1_HRF DMA TX", @@ -2982,7 +3026,7 @@ int q2spi_process_hrf_flow_after_lra(struct q2spi_geni *q2spi, struct q2spi_pack long timeout = 0; int ret = -1; - Q2SPI_DEBUG(q2spi, "%s VAR1 wait for doorbell\n", __func__); + Q2SPI_DBG_1(q2spi, "%s VAR1 wait for doorbell\n", __func__); /* Make sure we get the doorbell before continuing for HRF flow */ xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); timeout = wait_for_completion_interruptible_timeout(&q2spi_pkt->wait_for_db, xfer_timeout); @@ -2996,7 +3040,7 @@ int q2spi_process_hrf_flow_after_lra(struct q2spi_geni *q2spi, struct q2spi_pack return -ETIMEDOUT; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d cr_flow_id:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p flow_id:%d cr_flow_id:%d\n", __func__, q2spi_pkt, q2spi_pkt->flow_id, q2spi_pkt->cr_var3.flow_id); if (q2spi_pkt->flow_id == q2spi_pkt->cr_var3.flow_id) { q2spi_pkt->vtype = VARIANT_5; @@ -3008,7 +3052,7 @@ int q2spi_process_hrf_flow_after_lra(struct q2spi_geni *q2spi, struct q2spi_pack Q2SPI_DEBUG(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s wakeup sma_wr_comp\n", __func__); + Q2SPI_DBG_2(q2spi, "%s wakeup sma_wr_comp\n", __func__); complete_all(&q2spi->sma_wr_comp); atomic_set(&q2spi->sma_wr_pending, 0); } else { @@ -3035,9 +3079,9 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) bool cm_flow_pkt = false; if (ptr) - Q2SPI_DEBUG(q2spi, "Enter %s for %p\n", __func__, ptr); + Q2SPI_DBG_2(q2spi, "Enter %s for %p\n", __func__, ptr); else - Q2SPI_DEBUG(q2spi, "Enter %s PID %d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "Enter %s PID %d\n", __func__, current->pid); if (q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__); @@ -3058,11 +3102,11 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) if (q2spi_pkt_tmp1->state == NOT_IN_USE && q2spi_pkt_tmp1 == (struct q2spi_packet *)ptr) { q2spi_pkt = q2spi_pkt_tmp1; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt %p state:%s\n", + Q2SPI_DBG_1(q2spi, "%s sending q2spi_pkt %p state:%s\n", __func__, q2spi_pkt, q2spi_pkt_state(q2spi_pkt)); break; } - Q2SPI_DEBUG(q2spi, "%s check q2spi_pkt %p state:%s\n", + Q2SPI_DBG_1(q2spi, "%s check q2spi_pkt %p state:%s\n", __func__, q2spi_pkt_tmp1, q2spi_pkt_state(q2spi_pkt_tmp1)); } mutex_unlock(&q2spi->queue_lock); @@ -3073,7 +3117,6 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) } q2spi_pkt->state = IN_USE; - Q2SPI_DEBUG(q2spi, "%s send q2spi_pkt %p\n", __func__, q2spi_pkt); if (!q2spi_pkt) { Q2SPI_DEBUG(q2spi, "%s q2spi_pkt is NULL\n", __func__); ret = -EAGAIN; @@ -3090,18 +3133,18 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) if (ret) goto send_msg_exit; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->vtype=%d cr_hdr_type=%d\n", + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt->vtype=%d cr_hdr_type=%d\n", __func__, q2spi_pkt->vtype, q2spi_pkt->cr_hdr_type); if (q2spi_pkt->vtype == VARIANT_5) { if (q2spi_pkt->var5_pkt->flow_id >= Q2SPI_END_TID_ID) { cm_flow_pkt = true; - Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s flow_id:%d\n", __func__, q2spi_pkt->var5_pkt->flow_id); } } if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending)) - Q2SPI_DEBUG(q2spi, "%s cm_flow_pkt:%d doorbell_pending:%d\n", + Q2SPI_DBG_1(q2spi, "%s cm_flow_pkt:%d doorbell_pending:%d\n", __func__, cm_flow_pkt, atomic_read(&q2spi->doorbell_pending)); ret = q2spi_gsi_submit(q2spi_pkt); @@ -3112,9 +3155,9 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr) } if (q2spi_pkt->vtype == VARIANT_5) { - Q2SPI_DEBUG(q2spi, "%s wakeup sma_wait\n", __func__); + Q2SPI_DBG_2(q2spi, "%s wakeup sma_wait\n", __func__); complete_all(&q2spi->sma_wait); - Q2SPI_DEBUG(q2spi, "%s wakeup sma_rd_comp\n", __func__); + Q2SPI_DBG_2(q2spi, "%s wakeup sma_rd_comp\n", __func__); complete_all(&q2spi->sma_rd_comp); atomic_set(&q2spi->sma_rd_pending, 0); } @@ -3126,7 +3169,7 @@ send_msg_exit: mutex_unlock(&q2spi->send_msgs_lock); if (atomic_read(&q2spi->sma_rd_pending)) atomic_set(&q2spi->sma_rd_pending, 0); - Q2SPI_DEBUG(q2spi, "%s: line:%d End\n", __func__, __LINE__); + Q2SPI_DBG_2(q2spi, "%s: line:%d End\n", __func__, __LINE__); return ret; } @@ -3183,7 +3226,7 @@ static int q2spi_proto_init(struct q2spi_geni *q2spi) geni_write_reg(spi_delay_reg, q2spi->base, SPI_DELAYS_COUNTERS); se_geni_cfg_95 |= M_GP_CNT7_TSN & M_GP_CNT7; geni_write_reg(se_geni_cfg_95, q2spi->base, SE_GENI_CFG_REG95); - Q2SPI_DEBUG(q2spi, "tx_cfg: 0x%x io3_sel:0x%x spi_delay: 0x%x cfg_95:0x%x\n", + Q2SPI_DBG_2(q2spi, "tx_cfg: 0x%x io3_sel:0x%x spi_delay: 0x%x cfg_95:0x%x\n", geni_read_reg(q2spi->base, SE_SPI_TRANS_CFG), geni_read_reg(q2spi->base, GENI_CFG_REG80), geni_read_reg(q2spi->base, SPI_DELAYS_COUNTERS), @@ -3196,14 +3239,14 @@ static int q2spi_proto_init(struct q2spi_geni *q2spi) word_len &= ~WORD_LEN_MSK; word_len |= MIN_WORD_LEN & WORD_LEN_MSK; geni_write_reg(word_len, q2spi->base, SE_SPI_WORD_LEN); - Q2SPI_DEBUG(q2spi, "cfg_103: 0x%x cfg_104:0x%x pre_post_dly;0x%x spi_word_len:0x%x\n", + Q2SPI_DBG_2(q2spi, "cfg_103: 0x%x cfg_104:0x%x pre_post_dly;0x%x spi_word_len:0x%x\n", geni_read_reg(q2spi->base, SE_GENI_CFG_REG103), geni_read_reg(q2spi->base, SE_GENI_CFG_REG104), pre_post_dly, geni_read_reg(q2spi->base, SE_SPI_WORD_LEN)); io3_sel &= ~OTHER_IO_OE; io3_sel |= (IO_MACRO_IO3_DATA_IN_SEL << IO_MACRO_IO3_DATA_IN_SEL_SHIFT) & IO_MACRO_IO3_DATA_IN_SEL_MASK; - Q2SPI_DEBUG(q2spi, "io3_sel:0x%x %lx TPM:0x%x %d\n", io3_sel, + Q2SPI_DBG_2(q2spi, "io3_sel:0x%x %lx TPM:0x%x %d\n", io3_sel, (IO_MACRO_IO3_DATA_IN_SEL & IO_MACRO_IO3_DATA_IN_SEL_MASK), SPI_PIPE_DLY_TPM, SPI_PIPE_DLY_TPM << M_GP_CNT6_CN_SHIFT); return 0; @@ -3236,7 +3279,7 @@ static int q2spi_geni_init(struct q2spi_geni *q2spi) ver = geni_se_get_qup_hw_version(&q2spi->se); major = GENI_SE_VERSION_MAJOR(ver); minor = GENI_SE_VERSION_MINOR(ver); - Q2SPI_DEBUG(q2spi, "%s ver:0x%x major:%d minor:%d\n", __func__, ver, major, minor); + Q2SPI_DBG_2(q2spi, "%s ver:0x%x major:%d minor:%d\n", __func__, ver, major, minor); if (major == 1 && minor == 0) q2spi->oversampling = 2; @@ -3255,7 +3298,7 @@ static int q2spi_geni_init(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "%s: Err GSI mode not supported!\n", __func__); return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s gsi_mode:%d xfer_mode:%d ret:%d\n", + Q2SPI_DBG_1(q2spi, "%s gsi_mode:%d xfer_mode:%d ret:%d\n", __func__, q2spi->gsi_mode, q2spi->xfer_mode, ret); return ret; } @@ -3277,7 +3320,7 @@ void q2spi_geni_resources_off(struct q2spi_geni *q2spi) se = &q2spi->se; mutex_lock(&q2spi->geni_resource_lock); if (!q2spi->resources_on) { - Q2SPI_DEBUG(q2spi, "%s: Resources already off\n", __func__); + Q2SPI_DBG_1(q2spi, "%s: Resources already off\n", __func__); goto exit_resource_off; } @@ -3302,7 +3345,7 @@ void q2spi_geni_resources_off(struct q2spi_geni *q2spi) exit_resource_off: mutex_unlock(&q2spi->geni_resource_lock); - Q2SPI_DEBUG(q2spi, "%s: ret:%d\n", __func__, ret); + Q2SPI_DBG_1(q2spi, "%s: ret:%d\n", __func__, ret); } /** @@ -3317,9 +3360,9 @@ int q2spi_geni_resources_on(struct q2spi_geni *q2spi) int ret = 0; mutex_lock(&q2spi->geni_resource_lock); - Q2SPI_DEBUG(q2spi, "%s PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PID=%d\n", __func__, current->pid); if (q2spi->resources_on) { - Q2SPI_DEBUG(q2spi, "%s: Resources already on\n", __func__); + Q2SPI_DBG_1(q2spi, "%s: Resources already on\n", __func__); goto exit_resource_on; } @@ -3348,7 +3391,7 @@ int q2spi_geni_resources_on(struct q2spi_geni *q2spi) exit_resource_on: mutex_unlock(&q2spi->geni_resource_lock); - Q2SPI_DEBUG(q2spi, "%s: ret:%d\n", __func__, ret); + Q2SPI_DBG_1(q2spi, "%s: ret:%d\n", __func__, ret); return ret; } @@ -3374,10 +3417,10 @@ static int q2spi_get_icc_pinctrl(struct platform_device *pdev, Q2SPI_CORE2X_VOTE, APPS_PROC_TO_QUP_VOTE, (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH)); if (ret) { - Q2SPI_DEBUG(q2spi, "Error geni_se_resources_init\n"); + Q2SPI_DEBUG(q2spi, "Err geni_se_resources_init\n"); goto get_icc_pinctrl_err; } - Q2SPI_DEBUG(q2spi, "%s GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n", + Q2SPI_DBG_1(q2spi, "%s GENI_TO_CORE:%d CPU_TO_GENI:%d GENI_TO_DDR:%d\n", __func__, q2spi_rsc->icc_paths[GENI_TO_CORE].avg_bw, q2spi_rsc->icc_paths[CPU_TO_GENI].avg_bw, q2spi_rsc->icc_paths[GENI_TO_DDR].avg_bw); @@ -3502,7 +3545,7 @@ static int q2spi_chardev_create(struct q2spi_geni *q2spi) Q2SPI_DEBUG(q2spi, "failed to create device\n"); goto err_dev_create; } - Q2SPI_DEBUG(q2spi, "%s q2spi:%p i:%d end\n", __func__, q2spi, i); + Q2SPI_DBG_2(q2spi, "%s q2spi:%p i:%d end\n", __func__, q2spi, i); } return 0; @@ -3538,9 +3581,7 @@ int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset) q2spi_req.addr = reg_offset; q2spi_req.data_len = 4; /* In bytes */ - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p &q2spi_pkt=%p\n", __func__, q2spi_pkt, &q2spi_pkt); ret = q2spi_frame_lra(q2spi, &q2spi_req, &q2spi_pkt, VARIANT_1_LRA); - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d\n", __func__, q2spi_pkt, ret); if (ret < 0) { Q2SPI_DEBUG(q2spi, "Err q2spi_frame_lra failed ret:%d\n", ret); return ret; @@ -3551,19 +3592,18 @@ int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset) xfer->rx_buf = q2spi_pkt->xfer->rx_buf; xfer->rx_dma = q2spi_pkt->xfer->rx_dma; xfer->cmd = q2spi_pkt->m_cmd_param; - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p cmd:%d\n", __func__, q2spi_pkt, xfer->cmd); + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p cmd:%d\n", __func__, q2spi_pkt, xfer->cmd); xfer->tx_data_len = q2spi_req.data_len; xfer->tx_len = Q2SPI_HEADER_LEN; xfer->rx_data_len = q2spi_req.data_len; xfer->rx_len = xfer->rx_data_len; xfer->tid = q2spi_pkt->var1_pkt->flow_id; - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", __func__, xfer->tx_buf, (void *)xfer->tx_dma, xfer->rx_buf, (void *)xfer->rx_dma, xfer->tx_len, xfer->rx_len); - q2spi_dump_ipc(q2spi, "q2spi read reg tx_buf DMA TX", - (char *)xfer->tx_buf, xfer->tx_len); + q2spi_dump_ipc(q2spi, "q2spi read reg tx_buf DMA TX", (char *)xfer->tx_buf, xfer->tx_len); ret = q2spi_gsi_submit(q2spi_pkt); if (ret) { @@ -3572,7 +3612,7 @@ int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset) } q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid); - Q2SPI_DEBUG(q2spi, "Reg:0x%x Read Val = 0x%x\n", reg_offset, *(unsigned int *)xfer->rx_buf); + Q2SPI_DBG_1(q2spi, "Reg:0x%x Read Val = 0x%x\n", reg_offset, *(unsigned int *)xfer->rx_buf); return ret; } @@ -3604,7 +3644,7 @@ static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned lo Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p\n", __func__, q2spi_pkt); xfer = q2spi_pkt->xfer; xfer->tx_buf = q2spi_pkt->var1_pkt; xfer->tx_dma = q2spi_pkt->var1_tx_dma; @@ -3613,11 +3653,10 @@ static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned lo xfer->tx_len = Q2SPI_HEADER_LEN + xfer->tx_data_len; xfer->tid = q2spi_pkt->var1_pkt->flow_id; - Q2SPI_DEBUG(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", + Q2SPI_DBG_1(q2spi, "%s tx_buf:%p tx_dma:%p rx_buf:%p rx_dma:%p tx_len:%d rx_len:%d\n", __func__, xfer->tx_buf, (void *)xfer->tx_dma, xfer->rx_buf, (void *)xfer->rx_dma, xfer->tx_len, xfer->rx_len); - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt->var1_pkt_add:%p\n", __func__, q2spi_pkt->var1_pkt); q2spi_dump_ipc(q2spi, "q2spi_read_reg tx_buf DMA TX", (char *)xfer->tx_buf, xfer->tx_len); ret = q2spi_gsi_submit(q2spi_pkt); @@ -3627,7 +3666,7 @@ static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned lo } q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid); - Q2SPI_DEBUG(q2spi, "%s write to reg success ret:%d\n", __func__, ret); + Q2SPI_DBG_1(q2spi, "%s write to reg success ret:%d\n", __func__, ret); return ret; } @@ -3653,7 +3692,6 @@ static int q2spi_slave_init(struct q2spi_geni *q2spi, bool slave_init) if (!slave_init) return 0; - Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_SCRATCH0); /* Dummy SCRATCH register write */ ret = q2spi_write_reg(q2spi, Q2SPI_SCRATCH0, scratch_data); if (ret) { @@ -3662,7 +3700,6 @@ static int q2spi_slave_init(struct q2spi_geni *q2spi, bool slave_init) } /* Dummy SCRATCH register read */ - Q2SPI_DEBUG(q2spi, "%s reg: 0x%x\n", __func__, Q2SPI_SCRATCH0); ret = q2spi_read_reg(q2spi, Q2SPI_SCRATCH0); if (ret) { Q2SPI_DEBUG(q2spi, "Err scratch0 read failed: %d\n", ret); @@ -3675,28 +3712,28 @@ static int q2spi_slave_init(struct q2spi_geni *q2spi, bool slave_init) */ while (retries > 0 && value <= 0) { value = q2spi_read_reg(q2spi, Q2SPI_HW_VERSION); - Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); + Q2SPI_DBG_1(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); if (value <= 0) Q2SPI_DEBUG(q2spi, "HW_Version read failed: %d\n", ret); retries--; - Q2SPI_DEBUG(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); + Q2SPI_DBG_1(q2spi, "%s retries:%d value:%d\n", __func__, retries, value); } - Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_HOST_CFG); + Q2SPI_DBG_1(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_HOST_CFG); ret = q2spi_read_reg(q2spi, Q2SPI_HOST_CFG); if (ret) { Q2SPI_DEBUG(q2spi, "Err HOST CFG read failed: %d\n", ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); + Q2SPI_DBG_1(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); ret = q2spi_write_reg(q2spi, Q2SPI_ERROR_EN, error_en_data); if (ret) { Q2SPI_DEBUG(q2spi, "Err Error_en reg write failed: %d\n", ret); return ret; } - Q2SPI_DEBUG(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); + Q2SPI_DBG_1(q2spi, "%s reg:0x%x\n", __func__, Q2SPI_ERROR_EN); ret = q2spi_read_reg(q2spi, Q2SPI_ERROR_EN); if (ret) { Q2SPI_DEBUG(q2spi, "Err HOST CFG read failed: %d\n", ret); @@ -3757,7 +3794,7 @@ q2spi_copy_cr_data_to_pkt(struct q2spi_packet *q2spi_pkt, struct q2spi_cr_packet memcpy(&q2spi_pkt->cr_bulk, &cr_pkt->bulk_pkt[idx], sizeof(struct q2spi_client_bulk_access_pkt)); q2spi_pkt->cr_hdr_type = cr_pkt->cr_hdr_type[idx]; - Q2SPI_DEBUG(q2spi_pkt->q2spi, "%s q2spi_pkt:%p cr_hdr_type:%d\n", + Q2SPI_DBG_1(q2spi_pkt->q2spi, "%s q2spi_pkt:%p cr_hdr_type:%d\n", __func__, q2spi_pkt, q2spi_pkt->cr_hdr_type); } @@ -3786,7 +3823,7 @@ int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet * ((cr_pkt->var3_pkt[idx].dw_len_part2 << 4) & 0xFF) | cr_pkt->var3_pkt[idx].dw_len_part1; q2spi_req.data_len = (dw_len * 4) + 4; - Q2SPI_DEBUG(q2spi, "%s dw_len:%d data_len:%d\n", __func__, dw_len, q2spi_req.data_len); + Q2SPI_DBG_1(q2spi, "%s dw_len:%d data_len:%d\n", __func__, dw_len, q2spi_req.data_len); q2spi_req.cmd = DATA_READ; q2spi_req.addr = 0; q2spi_req.end_point = 0; @@ -3815,7 +3852,7 @@ int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet * q2spi_copy_cr_data_to_pkt((struct q2spi_packet *)*q2spi_pkt, cr_pkt, idx); ((struct q2spi_packet *)*q2spi_pkt)->var3_data_len = q2spi_req.data_len; if (atomic_read(&q2spi->sma_wr_pending)) { - Q2SPI_DEBUG(q2spi, "%s sma write is pending wait\n", __func__); + Q2SPI_DBG_1(q2spi, "%s sma write is pending wait\n", __func__); xfer_timeout = msecs_to_jiffies(XFER_TIMEOUT_OFFSET); timeout = wait_for_completion_interruptible_timeout(&q2spi->sma_wr_comp, xfer_timeout); @@ -3834,7 +3871,7 @@ int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet * } ret = __q2spi_send_messages(q2spi, (void *)*q2spi_pkt); q2spi->sys_mem_read_in_progress = false; - Q2SPI_DEBUG(q2spi, "%s End ret:%d %d\n", __func__, ret, __LINE__); + Q2SPI_DBG_1(q2spi, "%s End ret:%d %d\n", __func__, ret, __LINE__); return ret; } @@ -3862,11 +3899,11 @@ void q2spi_find_pkt_by_flow_id(struct q2spi_geni *q2spi, struct q2spi_cr_packet } mutex_unlock(&q2spi->queue_lock); if (q2spi_pkt) { - Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n", + Q2SPI_DBG_1(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n", __func__, q2spi_pkt, flow_id); if (!atomic_read(&q2spi->sma_wr_pending)) { atomic_set(&q2spi->sma_wr_pending, 1); - Q2SPI_DEBUG(q2spi, "%s sma_wr_pending set for prev DB\n", __func__); + Q2SPI_DBG_1(q2spi, "%s sma_wr_pending set for prev DB\n", __func__); } /* wakeup HRF flow which is waiting for this CR doorbell */ @@ -3896,7 +3933,7 @@ void q2spi_set_data_avail_in_pkt(struct q2spi_geni *q2spi, struct q2spi_cr_packe if (q2spi_pkt_tmp1->cr_var3.flow_id == flow_id && q2spi_pkt_tmp1->state == IN_USE) { q2spi_pkt = q2spi_pkt_tmp1; - Q2SPI_DEBUG(q2spi, "%s Found CR PKT for flow_id:%d", + Q2SPI_DBG_1(q2spi, "%s Found CR PKT for flow_id:%d", __func__, flow_id); break; } @@ -3905,11 +3942,11 @@ void q2spi_set_data_avail_in_pkt(struct q2spi_geni *q2spi, struct q2spi_cr_packe mutex_unlock(&q2spi->queue_lock); if (q2spi_pkt) { - Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d", + Q2SPI_DBG_1(q2spi, "%s Found q2spi_pkt %p with flow_id %d", __func__, q2spi_pkt, flow_id); q2spi_pkt->state = DATA_AVAIL; } else { - Q2SPI_DEBUG(q2spi, "%s Err q2spi_pkt not found for flow_id %d\n", + Q2SPI_DBG_1(q2spi, "%s Err q2spi_pkt not found for flow_id %d\n", __func__, flow_id); } } @@ -3937,7 +3974,7 @@ void q2spi_complete_bulk_status(struct q2spi_geni *q2spi, struct q2spi_cr_packet } mutex_unlock(&q2spi->queue_lock); if (q2spi_pkt) { - Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n", + Q2SPI_DBG_1(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n", __func__, q2spi_pkt, flow_id); q2spi_copy_cr_data_to_pkt(q2spi_pkt, cr_pkt, idx); complete_all(&q2spi_pkt->bulk_wait); @@ -3959,7 +3996,7 @@ static void q2spi_handle_wakeup_work(struct work_struct *work) container_of(work, struct q2spi_geni, q2spi_wakeup_work); int ret = 0; - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d q2spi:%p\n", __func__, current->pid, q2spi); + Q2SPI_DBG_1(q2spi, "%s Enter PID=%d q2spi:%p\n", __func__, current->pid, q2spi); ret = q2spi_geni_runtime_resume(q2spi->dev); if (ret) @@ -3977,7 +4014,7 @@ static void q2spi_sleep_work_func(struct work_struct *work) struct q2spi_geni *q2spi = container_of(work, struct q2spi_geni, q2spi_sleep_work); - Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s: PID=%d\n", __func__, current->pid); if (q2spi_sys_restart || q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state or sys_restart\n", __func__); return; @@ -4002,7 +4039,7 @@ static void q2spi_handle_doorbell_work(struct work_struct *work) bool sys_mem_access = false; long timeout = 0; - Q2SPI_DEBUG(q2spi, "%s Enter PID=%d q2spi:%p PM get_sync count:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s Enter PID=%d q2spi:%p PM get_sync count:%d\n", __func__, current->pid, q2spi, atomic_read(&q2spi->dev->power.usage_count)); ret = pm_runtime_get_sync(q2spi->dev); if (ret < 0) { @@ -4011,7 +4048,7 @@ static void q2spi_handle_doorbell_work(struct work_struct *work) pm_runtime_set_suspended(q2spi->dev); return; } - Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__, + Q2SPI_DBG_2(q2spi, "%s PM after get_sync count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); /* wait for RX dma channel TCE 0x22 to get CR body in RX DMA buffer */ ret = check_gsi_transfer_completion_db_rx(q2spi); @@ -4040,20 +4077,20 @@ static void q2spi_handle_doorbell_work(struct work_struct *work) reinit_completion(&q2spi->sma_wait); no_of_crs = q2spi_cr_pkt->num_valid_crs; - Q2SPI_DEBUG(q2spi, "%s q2spi_cr_pkt:%p q2spi_db_xfer:%p db_xfer_rx_buf:%p\n", + Q2SPI_DBG_2(q2spi, "%s q2spi_cr_pkt:%p q2spi_db_xfer:%p db_xfer_rx_buf:%p\n", __func__, q2spi_cr_pkt, q2spi->db_xfer, q2spi->db_xfer->rx_buf); for (i = 0; i < no_of_crs; i++) { - Q2SPI_DEBUG(q2spi, "%s i=%d CR Header CMD 0x%x\n", + Q2SPI_DBG_1(q2spi, "%s i=%d CR Header CMD 0x%x\n", __func__, i, q2spi_cr_pkt->cr_hdr[i].cmd); if (q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS || q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_RD_ACCESS) { if (q2spi_cr_pkt->cr_hdr[i].flow) { /* C->M flow */ - Q2SPI_DEBUG(q2spi, + Q2SPI_DBG_2(q2spi, "%s cr_hdr ADDR_LESS_WR/RD_ACCESS with client flow opcode:%d\n", __func__, q2spi_cr_pkt->cr_hdr[i].cmd); - Q2SPI_DEBUG(q2spi, "%s len_part1:%d len_part2:%d len_part3:%d\n", + Q2SPI_DBG_2(q2spi, "%s len_part1:%d len_part2:%d len_part3:%d\n", __func__, q2spi_cr_pkt->var3_pkt[i].dw_len_part1, q2spi_cr_pkt->var3_pkt[i].dw_len_part2, q2spi_cr_pkt->var3_pkt[i].dw_len_part3); @@ -4063,36 +4100,36 @@ static void q2spi_handle_doorbell_work(struct work_struct *work) sys_mem_access = true; } else { /* M->C flow */ - Q2SPI_DEBUG(q2spi, + Q2SPI_DBG_2(q2spi, "%s cr_hdr ADDR_LESS_WR/RD with Host flow, opcode:%d\n", __func__, q2spi_cr_pkt->cr_hdr[i].cmd); if (q2spi_cr_pkt->cr_hdr[i].cmd == ADDR_LESS_WR_ACCESS) { q2spi_find_pkt_by_flow_id(q2spi, q2spi_cr_pkt, i); - Q2SPI_DEBUG(q2spi, "%s cmd:%d doorbell CR for Host flow\n", + Q2SPI_DBG_1(q2spi, "%s cmd:%d doorbell CR for Host flow\n", __func__, q2spi_cr_pkt->cr_hdr[i].cmd); } } } else if (q2spi_cr_pkt->cr_hdr[i].cmd == BULK_ACCESS_STATUS) { if (q2spi_cr_pkt->bulk_pkt[i].flow_id >= 0x8) { - Q2SPI_DEBUG(q2spi, "%s Bulk status with Client Flow ID\n", + Q2SPI_DBG_1(q2spi, "%s Bulk status with Client Flow ID\n", __func__); q2spi_set_data_avail_in_pkt(q2spi, q2spi_cr_pkt, i); q2spi_notify_data_avail_for_client(q2spi); if (!timer_pending(&q2spi->slave_sleep_timer)) { - Q2SPI_DEBUG(q2spi, "%s sleep timer expired\n", __func__); + Q2SPI_DBG_1(q2spi, "%s sleep timer expired\n", __func__); q2spi_put_slave_to_sleep(q2spi); } } else { - Q2SPI_DEBUG(q2spi, "%s Bulk status with host Flow ID:%d\n", + Q2SPI_DBG_1(q2spi, "%s Bulk status with host Flow ID:%d\n", __func__, q2spi_cr_pkt->bulk_pkt[i].flow_id); q2spi_complete_bulk_status(q2spi, q2spi_cr_pkt, i); } } else if (q2spi_cr_pkt->cr_hdr[i].cmd == CR_EXTENSION) { - Q2SPI_DEBUG(q2spi, "%s Extended CR from Client\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Extended CR from Client\n", __func__); } if (sys_mem_access) { - Q2SPI_DEBUG(q2spi, "%s waiting on sma_wait\n", __func__); + Q2SPI_DBG_2(q2spi, "%s waiting on sma_wait\n", __func__); /* Block on read_wq until sma complete */ timeout = wait_for_completion_interruptible_timeout (&q2spi->sma_wait, msecs_to_jiffies(XFER_TIMEOUT_OFFSET)); @@ -4117,10 +4154,10 @@ static void q2spi_handle_doorbell_work(struct work_struct *work) exit_doorbell_work: pm_runtime_mark_last_busy(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s PM before put_autosuspend count:%d\n", + Q2SPI_DBG_2(q2spi, "%s PM before put_autosuspend count:%d\n", __func__, atomic_read(&q2spi->dev->power.usage_count)); pm_runtime_put_autosuspend(q2spi->dev); - Q2SPI_DEBUG(q2spi, "%s End PID=%d PM after put_autosuspend count:%d\n", + Q2SPI_DBG_1(q2spi, "%s End PID=%d PM after put_autosuspend count:%d\n", __func__, current->pid, atomic_read(&q2spi->dev->power.usage_count)); } @@ -4141,7 +4178,7 @@ static void q2spi_chardev_destroy(struct q2spi_geni *q2spi) } class_destroy(q2spi->chrdev.q2spi_class); unregister_chrdev_region(MKDEV(q2spi_cdev_major, 0), MINORMASK); - Q2SPI_DEBUG(q2spi, "%s End %d\n", __func__, q2spi_cdev_major); + Q2SPI_DBG_2(q2spi, "%s End %d\n", __func__, q2spi_cdev_major); } /** @@ -4178,7 +4215,7 @@ static int q2spi_sleep_config(struct q2spi_geni *q2spi, struct platform_device * q2spi->wake_mosi_gpio); return ret; } - Q2SPI_DEBUG(q2spi, "%s Q2SPI clk_gpio:%d mosi_gpio:%d\n", + Q2SPI_DBG_1(q2spi, "%s Q2SPI clk_gpio:%d mosi_gpio:%d\n", __func__, q2spi->wake_clk_gpio, q2spi->wake_mosi_gpio); q2spi->wakeup_wq = alloc_workqueue("%s", WQ_HIGHPRI, 1, dev_name(q2spi->dev)); @@ -4197,7 +4234,7 @@ static int q2spi_sleep_config(struct q2spi_geni *q2spi, struct platform_device * /* To use the Doorbel pin as wakeup irq */ q2spi->doorbell_irq = platform_get_irq(pdev, 1); - Q2SPI_DEBUG(q2spi, "%s Q2SPI doorbell_irq:%d\n", __func__, q2spi->doorbell_irq); + Q2SPI_DBG_1(q2spi, "%s Q2SPI doorbell_irq:%d\n", __func__, q2spi->doorbell_irq); irq_set_status_flags(q2spi->doorbell_irq, IRQ_NOAUTOEN); ret = devm_request_irq(q2spi->dev, q2spi->doorbell_irq, q2spi_geni_wakeup_isr, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, @@ -4225,7 +4262,7 @@ void q2spi_client_sleep_timeout_handler(struct timer_list *t) { struct q2spi_geni *q2spi = from_timer(q2spi, t, slave_sleep_timer); - Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s: PID=%d\n", __func__, current->pid); if (q2spi_sys_restart || q2spi->port_release) { Q2SPI_DEBUG(q2spi, "%s Err Port in closed state or sys_restart\n", __func__); return; @@ -4263,7 +4300,7 @@ static int q2spi_geni_restart_cb(struct notifier_block *nb, struct q2spi_geni *q2spi = container_of(nb, struct q2spi_geni, restart_handler); if (!q2spi) { - Q2SPI_ERROR(q2spi, "%s Err q2spi is NULL, PID=%d\n", __func__, current->pid); + Q2SPI_DEBUG(q2spi, "%s Err q2spi is NULL, PID=%d\n", __func__, current->pid); return -EINVAL; } Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid); @@ -4272,6 +4309,65 @@ static int q2spi_geni_restart_cb(struct notifier_block *nb, return 0; } +static int q2spi_read_dtsi(struct platform_device *pdev, struct q2spi_geni *q2spi) +{ + struct device *dev = &pdev->dev; + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Err getting IO region\n"); + return -EINVAL; + } + + q2spi->base = devm_ioremap_resource(dev, res); + if (IS_ERR(q2spi->base)) { + ret = PTR_ERR(q2spi->base); + dev_err(dev, "Err ioremap fail %d\n", ret); + return ret; + } + + q2spi->irq = platform_get_irq(pdev, 0); + if (q2spi->irq < 0) { + dev_err(dev, "Err for irq get %d\n", ret); + ret = q2spi->irq; + return ret; + } + + irq_set_status_flags(q2spi->irq, IRQ_NOAUTOEN); + ret = devm_request_irq(dev, q2spi->irq, q2spi_geni_irq, + IRQF_TRIGGER_HIGH, dev_name(dev), q2spi); + if (ret) { + dev_err(dev, "Err Failed to request irq %d\n", ret); + return ret; + } + + q2spi->se.dev = dev; + q2spi->se.wrapper = dev_get_drvdata(dev->parent); + if (!q2spi->se.wrapper) { + dev_err(dev, "Err SE Wrapper is NULL, deferring probe\n"); + return -EPROBE_DEFER; + } + + q2spi->ipc = ipc_log_context_create(15, dev_name(dev), 0); + if (!q2spi->ipc && IS_ENABLED(CONFIG_IPC_LOGGING)) + dev_err(dev, "Error creating IPC logs\n"); + + q2spi->se.base = q2spi->base; + if (q2spi_max_speed) { + q2spi->max_speed_hz = q2spi_max_speed; + } else { + if (of_property_read_u32(pdev->dev.of_node, "q2spi-max-frequency", + &q2spi->max_speed_hz)) { + Q2SPI_ERROR(q2spi, "Err Max frequency not specified\n"); + return -EINVAL; + } + } + + return ret; +} + /** * q2spi_geni_probe - Q2SPI interface driver probe function * @pdev: Q2SPI Serial Engine to probe. @@ -4286,7 +4382,6 @@ static int q2spi_geni_restart_cb(struct notifier_block *nb, static int q2spi_geni_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct resource *res; struct q2spi_geni *q2spi; int ret = 0; @@ -4299,62 +4394,17 @@ static int q2spi_geni_probe(struct platform_device *pdev) } q2spi->dev = dev; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "Err getting IO region\n"); - ret = -EINVAL; + ret = q2spi_read_dtsi(pdev, q2spi); + if (ret) goto q2spi_err; - } - q2spi->base = devm_ioremap_resource(dev, res); - if (IS_ERR(q2spi->base)) { - ret = PTR_ERR(q2spi->base); - dev_err(dev, "Err ioremap fail %d\n", ret); - goto q2spi_err; - } + if (device_create_file(dev, &dev_attr_log_level)) + Q2SPI_INFO(q2spi, "Unable to create device file for q2spi log level\n"); - q2spi->irq = platform_get_irq(pdev, 0); - if (q2spi->irq < 0) { - dev_err(dev, "Err for irq get %d\n", ret); - ret = q2spi->irq; - goto q2spi_err; - } - - irq_set_status_flags(q2spi->irq, IRQ_NOAUTOEN); - ret = devm_request_irq(dev, q2spi->irq, q2spi_geni_irq, - IRQF_TRIGGER_HIGH, dev_name(dev), q2spi); - if (ret) { - dev_err(dev, "Err Failed to request irq %d\n", ret); - goto q2spi_err; - } - - q2spi->se.dev = dev; - q2spi->se.wrapper = dev_get_drvdata(dev->parent); - if (!q2spi->se.wrapper) { - dev_err(dev, "Err SE Wrapper is NULL, deferring probe\n"); - ret = -EPROBE_DEFER; - goto q2spi_err; - } - - q2spi->ipc = ipc_log_context_create(15, dev_name(dev), 0); - if (!q2spi->ipc && IS_ENABLED(CONFIG_IPC_LOGGING)) - dev_err(dev, "Error creating IPC logs\n"); - - q2spi->se.base = q2spi->base; - if (q2spi_max_speed) { - q2spi->max_speed_hz = q2spi_max_speed; - } else { - if (of_property_read_u32(pdev->dev.of_node, "q2spi-max-frequency", - &q2spi->max_speed_hz)) { - Q2SPI_ERROR(q2spi, "Err Max frequency not specified\n"); - ret = -EINVAL; - goto q2spi_err; - } - } + q2spi->q2spi_log_lvl = LOG_DBG_LEVEL0; q2spi->wrapper_dev = dev->parent; - Q2SPI_DEBUG(q2spi, "%s q2spi:0x%p w_dev:0x%p dev:0x%p, p_dev:0x%p", + Q2SPI_DBG_1(q2spi, "%s q2spi:0x%p w_dev:0x%p dev:0x%p, p_dev:0x%p", __func__, q2spi, q2spi->wrapper_dev, dev, &pdev->dev); Q2SPI_INFO(q2spi, "%s dev:%s q2spi_max_freq:%uhz\n", __func__, dev_name(q2spi->dev), q2spi->max_speed_hz); @@ -4507,6 +4557,7 @@ q2spi_err: q2spi->base = NULL; ipc_log_context_destroy(q2spi->ipc); } + pr_err("%s: failed ret:%d\n", __func__, ret); return ret; } @@ -4573,7 +4624,7 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi) { int ret = 0; - Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell only\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Sending disconnect doorbell only\n", __func__); atomic_set(&q2spi->slave_in_sleep, 0); ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_default); @@ -4587,20 +4638,20 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi) /* Set Clock pin to Low */ gpio_set_value(q2spi->wake_clk_gpio, 0); - Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s:gpio(%d) value is %d\n", __func__, q2spi->wake_clk_gpio, gpio_get_value(q2spi->wake_clk_gpio)); gpio_direction_output(q2spi->wake_mosi_gpio, 0); /* Set Mosi pin to High */ gpio_set_value(q2spi->wake_mosi_gpio, 1); - Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s:gpio(%d) value is %d\n", __func__, q2spi->wake_mosi_gpio, gpio_get_value(q2spi->wake_mosi_gpio)); usleep_range(2000, 5000); /* Set back Mosi pin to Low */ gpio_set_value(q2spi->wake_mosi_gpio, 0); - Q2SPI_DEBUG(q2spi, "%s:gpio(%d) value is %d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s:gpio(%d) value is %d\n", __func__, q2spi->wake_mosi_gpio, gpio_get_value(q2spi->wake_mosi_gpio)); gpio_direction_input(q2spi->wake_mosi_gpio); @@ -4633,7 +4684,7 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) struct q2spi_request q2spi_req; int ret = 0; - Q2SPI_DEBUG(q2spi, "%s: PID=%d q2spi_sleep_cmd_enable:%d\n", + Q2SPI_DBG_1(q2spi, "%s: PID=%d q2spi_sleep_cmd_enable:%d\n", __func__, current->pid, q2spi->q2spi_sleep_cmd_enable); if (!q2spi->q2spi_sleep_cmd_enable) @@ -4663,7 +4714,7 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) return ret; } - Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid); + Q2SPI_DBG_1(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid); q2spi_pkt->is_client_sleep_pkt = true; ret = __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0); if (ret) { @@ -4680,7 +4731,7 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi) q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid); q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt); q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__); - Q2SPI_DEBUG(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid, + Q2SPI_DBG_1(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid, atomic_read(&q2spi->slave_in_sleep)); return ret; } @@ -4712,7 +4763,7 @@ static int q2spi_geni_runtime_suspend(struct device *dev) return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s PID=%d\n", __func__, current->pid); if (atomic_read(&q2spi->doorbell_pending)) { Q2SPI_DEBUG(q2spi, "%s CR Doorbell Pending\n", __func__); /* Update last access time of a device for autosuspend */ @@ -4726,7 +4777,7 @@ static int q2spi_geni_runtime_suspend(struct device *dev) q2spi_tx_queue_status(q2spi); q2spi_unmap_doorbell_rx_buf(q2spi); - Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell cmd\n", __func__); + Q2SPI_DBG_1(q2spi, "%s Sending disconnect doorbell cmd\n", __func__); geni_gsi_disconnect_doorbell_stop_ch(q2spi->gsi->tx_c, true); irq_set_irq_type(q2spi->doorbell_irq, IRQ_TYPE_LEVEL_HIGH); ret = irq_set_irq_wake(q2spi->doorbell_irq, 1); @@ -4756,9 +4807,8 @@ static int q2spi_geni_runtime_resume(struct device *dev) return -EINVAL; } - Q2SPI_DEBUG(q2spi, "%s PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s PID=%d\n", __func__, current->pid); if (atomic_read(&q2spi->is_suspend)) { - Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid); if (q2spi_geni_resources_on(q2spi)) return -EIO; disable_irq(q2spi->doorbell_irq); @@ -4772,7 +4822,7 @@ static int q2spi_geni_runtime_resume(struct device *dev) /* Clear is_suspend to map doorbell buffers */ atomic_set(&q2spi->is_suspend, 0); ret = q2spi_map_doorbell_rx_buf(q2spi); - Q2SPI_DEBUG(q2spi, "%s End ret:%d\n", __func__, ret); + Q2SPI_DBG_1(q2spi, "%s End ret:%d\n", __func__, ret); } return ret; } @@ -4781,8 +4831,8 @@ static int q2spi_geni_resume(struct device *dev) { struct q2spi_geni *q2spi = get_q2spi(dev); - Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid); - Q2SPI_DEBUG(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__, pm_runtime_status_suspended(dev), atomic_read(&q2spi->is_suspend), pm_runtime_enabled(dev)); @@ -4794,17 +4844,17 @@ static int q2spi_geni_suspend(struct device *dev) struct q2spi_geni *q2spi = get_q2spi(dev); int ret = 0; - Q2SPI_INFO(q2spi, "%s PID=%d\n", __func__, current->pid); - Q2SPI_DEBUG(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__, + Q2SPI_DBG_1(q2spi, "%s PID=%d\n", __func__, current->pid); + Q2SPI_DBG_2(q2spi, "%s PM state:%d is_suspend:%d pm_enable:%d\n", __func__, pm_runtime_status_suspended(dev), atomic_read(&q2spi->is_suspend), pm_runtime_enabled(dev)); if (pm_runtime_status_suspended(dev)) { - Q2SPI_DEBUG(q2spi, "%s: suspended state\n", __func__); + Q2SPI_DBG_1(q2spi, "%s: suspended state\n", __func__); return ret; } if (q2spi && !atomic_read(&q2spi->is_suspend)) { - Q2SPI_DEBUG(q2spi, "%s: PID=%d\n", __func__, current->pid); + Q2SPI_DBG_1(q2spi, "%s: PID=%d\n", __func__, current->pid); ret = q2spi_geni_runtime_suspend(dev); if (ret) { Q2SPI_DEBUG(q2spi, "%s: Err runtime_suspend fail\n", __func__); diff --git a/drivers/spi/q2spi-msm.h b/drivers/spi/q2spi-msm.h index e0b4db019da5..42eaa260f788 100644 --- a/drivers/spi/q2spi-msm.h +++ b/drivers/spi/q2spi-msm.h @@ -207,6 +207,26 @@ if (q2spi_ptr) { \ } \ } while (0) +#define Q2SPI_DBG_1(q2spi_ptr, x...) do { \ +if (q2spi_ptr) { \ + if (q2spi_ptr->q2spi_log_lvl >= LOG_DBG_LEVEL1) {\ + GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \ + if (q2spi_ptr->dev) \ + q2spi_trace_log(q2spi_ptr->dev, x); \ + } \ +} \ +} while (0) + +#define Q2SPI_DBG_2(q2spi_ptr, x...) do { \ +if (q2spi_ptr) { \ + if (q2spi_ptr->q2spi_log_lvl >= LOG_DBG_LEVEL2) {\ + GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \ + if (q2spi_ptr->dev) \ + q2spi_trace_log(q2spi_ptr->dev, x); \ + } \ +} \ +} while (0) + #define Q2SPI_DEBUG(q2spi_ptr, x...) do { \ if (q2spi_ptr) { \ GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \ @@ -255,6 +275,12 @@ enum q2spi_cr_hdr_type { CR_HDR_EXT = 3, }; +enum DEBUG_LOG_LVL { + LOG_DBG_LEVEL0 = 0, /* Indicates lowest level debug log level, default log level */ + LOG_DBG_LEVEL1 = 1, + LOG_DBG_LEVEL2 = 2, +}; + struct q2spi_mc_hrf_entry { u8 cmd:4; u8 flow:1; @@ -530,6 +556,7 @@ struct q2spi_dma_transfer { * @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header * @is_start_seq_fail: start sequence fail due to slave not responding * @wait_comp_start_fail: completion for transfer callback during start sequence failure + * @q2spi_log_lvl: reflects log level in q2spi driver */ struct q2spi_geni { struct device *wrapper_dev; @@ -638,6 +665,7 @@ struct q2spi_geni { bool q2spi_cr_hdr_err; bool is_start_seq_fail; struct completion wait_comp_start_fail; + u32 q2spi_log_lvl; }; /** diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c index 6f1b236134c8..86430e526f13 100644 --- a/drivers/tty/serial/msm_geni_serial.c +++ b/drivers/tty/serial/msm_geni_serial.c @@ -5475,10 +5475,14 @@ static int msm_geni_serial_port_init(struct platform_device *pdev, if (dev_port->is_console) { dev_port->handle_rx = handle_rx_console; dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32), GFP_KERNEL); + if (!dev_port->rx_fifo) + return -ENOMEM; } else { dev_port->handle_rx = handle_rx_hs; dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(dev_port->rx_fifo_depth * sizeof(u32)), GFP_KERNEL); + if (!dev_port->rx_fifo) + return -ENOMEM; if (dev_port->pm_auto_suspend_disable) { pm_runtime_set_active(&pdev->dev); pm_runtime_forbid(&pdev->dev); diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 7dd19a281579..6ca85580af15 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -28,6 +28,7 @@ #include #include #include +#include #define MSM_UART_MR1 0x0000 @@ -179,6 +180,8 @@ struct msm_port { bool break_detected; struct msm_dma tx_dma; struct msm_dma rx_dma; + /* BLSP UART required ICC BUS voting */ + struct icc_path *icc_path; }; static inline struct msm_port *to_msm_port(struct uart_port *up) @@ -186,6 +189,15 @@ static inline struct msm_port *to_msm_port(struct uart_port *up) return container_of(up, struct msm_port, uart); } +/* Interconnect path bandwidths (each times 1000 bytes per second) */ +#define BLSP_MEMORY_AVG 500 +#define BLSP_MEMORY_PEAK 800 + +static void msm_clk_bus_unprepare(struct msm_port *msm_uport); +static int msm_clk_bus_prepare(struct msm_port *msm_uport); +static int msm_clk_bus_vote(struct msm_port *msm_uport); +static void msm_clk_bus_unvote(struct msm_port *msm_uport); + static void msm_write(struct uart_port *port, unsigned int val, unsigned int off) { @@ -1186,15 +1198,6 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud, return baud; } -static void msm_init_clock(struct uart_port *port) -{ - struct msm_port *msm_port = to_msm_port(port); - - clk_prepare_enable(msm_port->clk); - clk_prepare_enable(msm_port->pclk); - msm_serial_set_mnd_regs(port); -} - static int msm_startup(struct uart_port *port) { struct msm_port *msm_port = to_msm_port(port); @@ -1204,7 +1207,7 @@ static int msm_startup(struct uart_port *port) snprintf(msm_port->name, sizeof(msm_port->name), "msm_serial%d", port->line); - msm_init_clock(port); + msm_serial_set_mnd_regs(port); if (likely(port->fifosize > 12)) rfr_level = port->fifosize - 12; @@ -1241,9 +1244,6 @@ err_irq: if (msm_port->is_uartdm) msm_release_dma(msm_port); - clk_disable_unprepare(msm_port->pclk); - clk_disable_unprepare(msm_port->clk); - return ret; } @@ -1257,8 +1257,6 @@ static void msm_shutdown(struct uart_port *port) if (msm_port->is_uartdm) msm_release_dma(msm_port); - clk_disable_unprepare(msm_port->clk); - free_irq(port->irq, port); } @@ -1420,15 +1418,21 @@ static void msm_power(struct uart_port *port, unsigned int state, unsigned int oldstate) { struct msm_port *msm_port = to_msm_port(port); + int ret; + + if (oldstate == state) + return; switch (state) { - case 0: - clk_prepare_enable(msm_port->clk); - clk_prepare_enable(msm_port->pclk); + case UART_PM_STATE_ON: + ret = msm_clk_bus_prepare(msm_port); + if (ret) + break; + msm_clk_bus_vote(msm_port); break; - case 3: - clk_disable_unprepare(msm_port->clk); - clk_disable_unprepare(msm_port->pclk); + case UART_PM_STATE_OFF: + msm_clk_bus_unvote(msm_port); + msm_clk_bus_unprepare(msm_port); break; default: pr_err("msm_serial: Unknown PM state %d\n", state); @@ -1536,6 +1540,58 @@ static void msm_poll_put_char(struct uart_port *port, unsigned char c) } #endif +static void msm_clk_bus_unprepare(struct msm_port *msm_uport) +{ + clk_disable_unprepare(msm_uport->clk); + if (msm_uport->pclk) + clk_disable_unprepare(msm_uport->pclk); +} + +static int msm_clk_bus_prepare(struct msm_port *msm_uport) +{ + int rc; + + /* Turn on core clk and iface clk */ + if (msm_uport->pclk) { + rc = clk_prepare_enable(msm_uport->pclk); + if (rc) { + dev_err(msm_uport->uart.dev, + "Could not turn on pclk [%d]\n", rc); + return rc; + } + } + rc = clk_prepare_enable(msm_uport->clk); + if (rc) { + dev_err(msm_uport->uart.dev, + "Could not turn on core clk [%d]\n", rc); + if (msm_uport->pclk) + clk_disable_unprepare(msm_uport->pclk); + } + return rc; +} + +static int msm_clk_bus_vote(struct msm_port *msm_uport) +{ + int rc; + + if (msm_uport->icc_path) { + rc = icc_set_bw(msm_uport->icc_path, + BLSP_MEMORY_AVG, BLSP_MEMORY_PEAK); + if (rc) { + dev_err(msm_uport->uart.dev, + "%s(): Error in seting bw [%d]\n", __func__, rc); + return rc; + } + } + return 0; +} + +static void msm_clk_bus_unvote(struct msm_port *msm_uport) +{ + if (msm_uport->icc_path) + icc_set_bw(msm_uport->icc_path, 0, 0); +} + static const struct uart_ops msm_uart_pops = { .tx_empty = msm_tx_empty, .set_mctrl = msm_set_mctrl, @@ -1700,7 +1756,7 @@ static int msm_console_setup(struct console *co, char *options) if (unlikely(!port->membase)) return -ENXIO; - msm_init_clock(port); + msm_serial_set_mnd_regs(port); if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); @@ -1794,6 +1850,7 @@ static int msm_serial_probe(struct platform_device *pdev) struct uart_port *port; const struct of_device_id *id; int irq, line; + int ret; if (pdev->dev.of_node) line = of_alias_get_id(pdev->dev.of_node, "serial"); @@ -1818,6 +1875,15 @@ static int msm_serial_probe(struct platform_device *pdev) else msm_port->is_uartdm = 0; + msm_port->icc_path = of_icc_get(&pdev->dev, "blsp-ddr"); + if (IS_ERR_OR_NULL(msm_port->icc_path)) { + ret = msm_port->icc_path ? + PTR_ERR(msm_port->icc_path) : -EINVAL; + dev_err(&pdev->dev, "%s(): failed to get ICC path: %d\n", __func__, ret); + msm_port->icc_path = NULL; + return -ENXIO; + } + msm_port->clk = devm_clk_get(&pdev->dev, "core"); if (IS_ERR(msm_port->clk)) return PTR_ERR(msm_port->clk); @@ -1918,7 +1984,7 @@ static void __exit msm_serial_exit(void) uart_unregister_driver(&msm_uart_driver); } -module_init(msm_serial_init); +subsys_initcall(msm_serial_init); module_exit(msm_serial_exit); MODULE_AUTHOR("Robert Love "); diff --git a/drivers/usb/dwc3/dwc3-msm-core.c b/drivers/usb/dwc3/dwc3-msm-core.c index 76fb6dc5b4aa..fc1bd07d1887 100644 --- a/drivers/usb/dwc3/dwc3-msm-core.c +++ b/drivers/usb/dwc3/dwc3-msm-core.c @@ -3125,6 +3125,7 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert) if (assert) { disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); + disable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); /* Using asynchronous block reset to the hardware */ dev_dbg(mdwc->dev, "block_reset ASSERT\n"); clk_disable_unprepare(mdwc->utmi_clk); @@ -3144,6 +3145,7 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert) clk_prepare_enable(mdwc->core_clk); clk_prepare_enable(mdwc->sleep_clk); clk_prepare_enable(mdwc->utmi_clk); + enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); } @@ -4205,6 +4207,7 @@ static void dwc3_msm_suspend_phy(struct dwc3_msm *mdwc) if (mdwc->lpm_flags & MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP) { dwc3_msm_set_pwr_events(mdwc, true); + enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); } } @@ -4223,7 +4226,8 @@ static void dwc3_msm_interrupt_enable(struct dwc3_msm *mdwc, bool enable) } } -static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse) +static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse, + bool enable_wakeup) { int ret; struct dwc3 *dwc = NULL; @@ -4346,11 +4350,13 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse) * case of platforms with mpm interrupts and snps phy, enable * dpse hsphy irq and dmse hsphy irq as done for pdc interrupts. */ - dwc3_msm_interrupt_enable(mdwc, true); + dwc3_msm_interrupt_enable(mdwc, enable_wakeup); if (mdwc->use_pwr_event_for_wakeup && - !(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) + !(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) { + enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); + } dev_info(mdwc->dev, "DWC3 in low power mode\n"); dbg_event(0xFF, "Ctl Sus", atomic_read(&mdwc->in_lpm)); @@ -4477,6 +4483,7 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc) atomic_set(&mdwc->in_lpm, 0); /* enable power evt irq for IN P3 detection */ + enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq); /* Disable HSPHY auto suspend and utmi sleep assert */ @@ -6216,6 +6223,7 @@ static int dwc3_msm_probe(struct platform_device *pdev) struct resource *res; int ret = 0, i; u32 val; + bool disable_wakeup; mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL); if (!mdwc) @@ -6390,7 +6398,10 @@ static int dwc3_msm_probe(struct platform_device *pdev) atomic_set(&mdwc->in_lpm, 1); pm_runtime_set_autosuspend_delay(mdwc->dev, 1000); pm_runtime_use_autosuspend(mdwc->dev); - device_init_wakeup(mdwc->dev, 1); + + disable_wakeup = + device_property_read_bool(mdwc->dev, "qcom,disable-wakeup"); + device_init_wakeup(mdwc->dev, !disable_wakeup); if (of_property_read_bool(node, "qcom,disable-dev-mode-pm")) pm_runtime_get_noresume(mdwc->dev); @@ -7465,10 +7476,17 @@ static int dwc3_msm_pm_suspend(struct device *dev) * Power collapse the core. Hence call dwc3_msm_suspend with * 'force_power_collapse' set to 'true'. */ - ret = dwc3_msm_suspend(mdwc, true); + ret = dwc3_msm_suspend(mdwc, true, device_may_wakeup(dev)); if (!ret) atomic_set(&mdwc->pm_suspended, 1); + /* + * Disable IRQs if not wakeup capable. Wakeup IRQs may sometimes + * be enabled as part of a runtime suspend. + */ + if (!device_may_wakeup(dev)) + dwc3_msm_interrupt_enable(mdwc, false); + return ret; } @@ -7616,7 +7634,7 @@ static int dwc3_msm_runtime_suspend(struct device *dev) if (dwc) device_init_wakeup(dwc->dev, false); - return dwc3_msm_suspend(mdwc, false); + return dwc3_msm_suspend(mdwc, false, true); } static int dwc3_msm_runtime_resume(struct device *dev) diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index ee95e8f5f9d4..ee772e39b1f0 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c @@ -105,6 +105,51 @@ static unsigned int bitrate(struct usb_gadget *g) #define RNDIS_STATUS_INTERVAL_MS 32 #define STATUS_BYTECOUNT 8 /* 8 bytes data */ +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_) \ + static ssize_t _f_##_opts_wceis_show(struct config_item *item, \ + char *page) \ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + wceis = opts->wceis; \ + mutex_unlock(&opts->lock); \ + return snprintf(page, PAGE_SIZE, "%d", wceis); \ + } \ + \ + static ssize_t _f_##_opts_wceis_store(struct config_item *item, \ + const char *page, size_t len)\ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + bool wceis; \ + int ret; \ + \ + if (opts->bound == false) { \ + pr_err("Gadget function do not bind yet.\n"); \ + return -ENODEV; \ + } \ + \ + mutex_lock(&opts->lock); \ + \ + ret = kstrtobool(page, &wceis); \ + if (ret) \ + goto out; \ + \ + opts->wceis = wceis; \ + ret = len; \ +out: \ + mutex_unlock(&opts->lock); \ + \ + return ret; \ + } \ + \ + CONFIGFS_ATTR(_f_##_opts_, wceis) /* interface descriptor: */ @@ -741,6 +786,27 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f) rndis_data_intf.bInterfaceNumber = status; rndis_union_desc.bSlaveInterface0 = status; + if (rndis_opts->wceis) { + /* "Wireless" RNDIS; auto-detected by Windows */ + rndis_iad_descriptor.bFunctionClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_iad_descriptor.bFunctionSubClass = 0x01; + rndis_iad_descriptor.bFunctionProtocol = 0x03; + rndis_control_intf.bInterfaceClass = + USB_CLASS_WIRELESS_CONTROLLER; + rndis_control_intf.bInterfaceSubClass = 0x01; + rndis_control_intf.bInterfaceProtocol = 0x03; + } else { + rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM; + rndis_iad_descriptor.bFunctionSubClass = + USB_CDC_SUBCLASS_ETHERNET; + rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE; + rndis_control_intf.bInterfaceClass = USB_CLASS_COMM; + rndis_control_intf.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM; + rndis_control_intf.bInterfaceProtocol = + USB_CDC_ACM_PROTO_VENDOR; + } + status = -ENODEV; /* allocate instance-specific endpoints */ @@ -878,6 +944,9 @@ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, subclass); /* f_rndis_opts_protocol */ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, protocol); +/* f_rndis_opts_wceis */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis); + static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_dev_addr, &rndis_opts_attr_host_addr, @@ -886,6 +955,7 @@ static struct configfs_attribute *rndis_attrs[] = { &rndis_opts_attr_class, &rndis_opts_attr_subclass, &rndis_opts_attr_protocol, + &rndis_opts_attr_wceis, NULL, }; @@ -925,7 +995,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) mutex_init(&opts->lock); opts->func_inst.free_func_inst = rndis_free_inst; - opts->net = gether_setup_default(); + opts->net = gether_setup_name_default("rndis"); if (IS_ERR(opts->net)) { struct net_device *net = opts->net; kfree(opts); @@ -950,6 +1020,9 @@ static struct usb_function_instance *rndis_alloc_inst(void) } opts->rndis_interf_group = rndis_interf_group; + /* Enable "Wireless" RNDIS by default */ + opts->wceis = true; + return &opts->func_inst; } diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index a8c409b2f52f..7232f2ae4648 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h @@ -39,6 +39,9 @@ struct f_rndis_opts { */ struct mutex lock; int refcnt; + + /* "Wireless" RNDIS; auto-detected by Windows */ + bool wceis; }; void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net); diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 5c192ac9488b..fe3a60c422fa 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -247,4 +247,18 @@ config USB_MSM_EUSB2_PHY To compile this driver as a module, choose M here. +config USB_MSM_OTG + tristate "Qualcomm Technologies, Inc. on-chip USB OTG controller support" + depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST) + depends on RESET_CONTROLLER + select USB_PHY + help + Enable this to support USB OTG transceiver on Qualcomm Technologies, Inc. + chips. It handles PHY initialization, clock management, and workarounds + required after resetting the hardware and power management. + This driver is required even for peripheral only or host only + mode configurations. + This driver is not supported on boards like trout which + has an external PHY. + endmenu diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile index 946fc2d60f02..02f1be4cc194 100644 --- a/drivers/usb/phy/Makefile +++ b/drivers/usb/phy/Makefile @@ -30,3 +30,4 @@ obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o obj-$(CONFIG_USB_MSM_EUSB2_PHY) += phy-msm-snps-eusb2.o obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb-v2.o phy-msm-qusb.o +obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 1b138a5476a3..90e4d5ba003d 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -53,6 +53,8 @@ enum core_ldo_levels { #define SW_PORTSELECT BIT(0) /* port select mux: 1 - sw control. 0 - HW control*/ #define SW_PORTSELECT_MX BIT(1) +/* port select polarity: 1 - invert polarity of portselect from gpio */ +#define PORTSELECT_POLARITY BIT(2) /* USB3_DP_PHY_USB3_DP_COM_SWI_CTRL bits */ @@ -150,6 +152,7 @@ struct msm_ssphy_qmp { int reg_offset_cnt; u32 *qmp_phy_init_seq; int init_seq_len; + bool invert_ps_polarity; enum qmp_phy_type phy_type; }; @@ -455,6 +458,14 @@ static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy) switch (phy->phy_type) { case USB3_AND_DP: + /* + * if port select inversion is enabled, enable it only for the input to the PHY. + * The lane selection based on PHY flags will not get affected. + */ + if (val < 0 && phy->invert_ps_polarity) + writel_relaxed(PORTSELECT_POLARITY, + phy->base + phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]); + writel_relaxed(0x01, phy->base + phy->phy_reg[USB3_DP_COM_SW_RESET]); writel_relaxed(0x00, @@ -1219,7 +1230,8 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev) &phy->vdd_max_uA) || !phy->vdd_max_uA) phy->vdd_max_uA = USB_SSPHY_HPM_LOAD; - platform_set_drvdata(pdev, phy); + phy->invert_ps_polarity = of_property_read_bool(dev->of_node, + "qcom,invert-ps-polarity"); phy->phy.dev = dev; phy->phy.init = msm_ssphy_qmp_init; diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c new file mode 100644 index 000000000000..b9e2c8e64661 --- /dev/null +++ b/drivers/usb/phy/phy-msm-usb.c @@ -0,0 +1,4787 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Requested USB votes for BUS bandwidth + * + * USB_NO_PERF_VOTE BUS Vote for inactive USB session or disconnect + * USB_MAX_PERF_VOTE Maximum BUS bandwidth vote + * USB_MIN_PERF_VOTE Minimum BUS bandwidth vote (for some hw same as NO_PERF) + * + */ +enum usb_bus_vote { + USB_NO_PERF_VOTE = 0, + USB_MAX_PERF_VOTE, + USB_MIN_PERF_VOTE, +}; + +/** + * Supported USB modes + * + * USB_PERIPHERAL Only peripheral mode is supported. + * USB_HOST Only host mode is supported. + * USB_OTG OTG mode is supported. + * + */ +enum usb_mode_type { + USB_NONE = 0, + USB_PERIPHERAL, + USB_HOST, + USB_OTG, +}; + +/** + * OTG control + * + * OTG_NO_CONTROL Id/VBUS notifications not required. Useful in host + * only configuration. + * OTG_PHY_CONTROL Id/VBUS notifications comes form USB PHY. + * OTG_PMIC_CONTROL Id/VBUS notifications comes from PMIC hardware. + * OTG_USER_CONTROL Id/VBUS notifications comes from User via sysfs. + * + */ +enum otg_control_type { + OTG_NO_CONTROL = 0, + OTG_PHY_CONTROL, + OTG_PMIC_CONTROL, + OTG_USER_CONTROL, +}; + +/** + * PHY used in + * + * INVALID_PHY Unsupported PHY + * CI_PHY Chipidea PHY + * SNPS_PICO_PHY Synopsis Pico PHY + * SNPS_FEMTO_PHY Synopsis Femto PHY + * QUSB_ULPI_PHY + * + */ +enum msm_usb_phy_type { + INVALID_PHY = 0, + CI_PHY, /* not supported */ + SNPS_PICO_PHY, + SNPS_FEMTO_PHY, + QUSB_ULPI_PHY, +}; + +#define IDEV_CHG_MAX 1500 +#define IUNIT 100 +#define IDEV_HVDCP_CHG_MAX 1800 +#define OTG_STATE_B_SUSPEND 4 +#define POWER_SUPPLY_TYPE_USB_FLOAT 13 /* Floating charger */ + +/** + * struct msm_otg_platform_data - platform device data + * for msm_otg driver. + * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as + * "do not overwrite default value at this address". + * @power_budget: VBUS power budget in mA (0 will be treated as 500mA). + * @mode: Supported mode (OTG/peripheral/host). + * @otg_control: OTG switch controlled by user/Id pin + * @default_mode: Default operational mode. Applicable only if + * OTG switch is controller by user. + * @pmic_id_irq: IRQ number assigned for PMIC USB ID line. + * @disable_reset_on_disconnect: perform USB PHY and LINK reset + * on USB cable disconnection. + * @enable_lpm_on_suspend: Enable the USB core to go into Low + * Power Mode, when USB bus is suspended but cable + * is connected. + * @core_clk_always_on_workaround: Don't disable core_clk when + * USB enters LPM. + * @delay_lpm_on_disconnect: Use a delay before entering LPM + * upon USB cable disconnection. + * @enable_sec_phy: Use second HSPHY with USB2 core + * @bus_scale_table: parameters for bus bandwidth requirements + * @log2_itc: value of 2^(log2_itc-1) will be used as the + * interrupt threshold (ITC), when log2_itc is + * between 1 to 7. + * @l1_supported: enable link power management support. + * @dpdm_pulldown_added: Indicates whether pull down resistors are + * connected on data lines or not. + * @vddmin_gpio: dedictaed gpio in the platform that is used for + * pullup the D+ line in case of bus suspend with + * phy retention. + * @enable_ahb2ahb_bypass: Indicates whether enable AHB2AHB BYPASS + * mode with controller in device mode. + * @bool disable_retention_with_vdd_min: Indicates whether to enable + allowing VDDmin without putting PHY into retention. + * @bool enable_phy_id_pullup: Indicates whether phy id pullup is + enabled or not. + * @usb_id_gpio: Gpio used for USB ID detection. + * @hub_reset_gpio: Gpio used for hub reset. + * @switch_sel_gpio: Gpio used for controlling switch that + routing D+/D- from the USB HUB to the USB jack type B + for peripheral mode. + * @bool phy_dvdd_always_on: PHY DVDD is supplied by always on PMIC LDO. + * @bool emulation: Indicates whether we are running on emulation platform. + * @bool enable_streaming: Indicates whether streaming to be enabled by default. + * @bool enable_axi_prefetch: Indicates whether AXI Prefetch interface is used + for improving data performance. + * @usbeth_reset_gpio: Gpio used for external usb-to-eth reset. + */ +struct msm_otg_platform_data { + int *phy_init_seq; + int phy_init_sz; + unsigned int power_budget; + enum usb_mode_type mode; + enum otg_control_type otg_control; + enum usb_mode_type default_mode; + enum msm_usb_phy_type phy_type; + int pmic_id_irq; + bool disable_reset_on_disconnect; + bool enable_lpm_on_dev_suspend; + bool core_clk_always_on_workaround; + bool delay_lpm_on_disconnect; + bool dp_manual_pullup; + bool enable_sec_phy; + struct msm_bus_scale_pdata *bus_scale_table; + int log2_itc; + bool l1_supported; + bool dpdm_pulldown_added; + int vddmin_gpio; + bool enable_ahb2ahb_bypass; + bool disable_retention_with_vdd_min; + bool enable_phy_id_pullup; + int usb_id_gpio; + int hub_reset_gpio; + int usbeth_reset_gpio; + int switch_sel_gpio; + bool phy_dvdd_always_on; + bool emulation; + bool enable_streaming; + bool enable_axi_prefetch; + bool vbus_low_as_hostmode; + bool phy_id_high_as_peripheral; +}; + +#define SDP_CHECK_DELAY_MS 10000 /* in ms */ +#define SDP_CHECK_BOOT_DELAY_MS 30000 /* in ms */ + +#define MSM_USB_BASE (motg->regs) +#define MSM_USB_PHY_CSR_BASE (motg->phy_csr_regs) + +#define DRIVER_NAME "msm_otg" + +#define ULPI_IO_TIMEOUT_USEC (10 * 1000) +#define USB_PHY_3P3_VOL_MIN 3050000 /* uV */ +#define USB_PHY_3P3_VOL_MAX 3300000 /* uV */ +#define USB_PHY_3P3_HPM_LOAD 50000 /* uA */ +#define USB_PHY_3P3_LPM_LOAD 4000 /* uA */ + +#define USB_PHY_1P8_VOL_MIN 1800000 /* uV */ +#define USB_PHY_1P8_VOL_MAX 1800000 /* uV */ +#define USB_PHY_1P8_HPM_LOAD 50000 /* uA */ +#define USB_PHY_1P8_LPM_LOAD 4000 /* uA */ + +#define USB_DEFAULT_SYSTEM_CLOCK 80000000 /* 80 MHz */ + +#define PM_QOS_SAMPLE_SEC 2 +#define PM_QOS_THRESHOLD 400 + +#define POWER_SUPPLY_PROP_REAL_TYPE 67 + +enum msm_otg_phy_reg_mode { + USB_PHY_REG_OFF, + USB_PHY_REG_ON, + USB_PHY_REG_LPM_ON, + USB_PHY_REG_LPM_OFF, + USB_PHY_REG_3P3_ON, + USB_PHY_REG_3P3_OFF, +}; + +static const char * const icc_path_names[] = { "usb-ddr" }; + +static struct { + u32 avg, peak; +} bus_vote_values[3][3] = { + /* usb_ddr avg/peak */ + [USB_NO_PERF_VOTE] = { {0, 0}, }, + [USB_MAX_PERF_VOTE] = { {0, 80000}, }, + [USB_MIN_PERF_VOTE] = { {6000, 6000}, }, +}; + + +static char *override_phy_init; +module_param(override_phy_init, charp, 0644); +MODULE_PARM_DESC(override_phy_init, + "Override HSUSB PHY Init Settings"); + +unsigned int lpm_disconnect_thresh = 1000; +module_param(lpm_disconnect_thresh, uint, 0644); +MODULE_PARM_DESC(lpm_disconnect_thresh, + "Delay before entering LPM on USB disconnect"); + +static bool floated_charger_enable; +module_param(floated_charger_enable, bool, 0644); +MODULE_PARM_DESC(floated_charger_enable, + "Whether to enable floated charger"); + +/* by default debugging is enabled */ +static unsigned int enable_dbg_log = 1; +module_param(enable_dbg_log, uint, 0644); +MODULE_PARM_DESC(enable_dbg_log, "Debug buffer events"); + +/* Max current to be drawn for DCP charger */ +static int dcp_max_current = IDEV_CHG_MAX; +module_param(dcp_max_current, int, 0644); +MODULE_PARM_DESC(dcp_max_current, "max current drawn for DCP charger"); + +static bool chg_detection_for_float_charger; +module_param(chg_detection_for_float_charger, bool, 0644); +MODULE_PARM_DESC(chg_detection_for_float_charger, + "Whether to do PHY based charger detection for float chargers"); + +static struct msm_otg *the_msm_otg; +static bool debug_bus_voting_enabled; + +static struct regulator *hsusb_3p3; +static struct regulator *hsusb_1p8; +static struct regulator *hsusb_vdd; +static struct regulator *vbus_otg; +static struct power_supply *psy; + +static int vdd_val[VDD_VAL_MAX]; +static u32 bus_freqs[USB_NOC_NUM_VOTE][USB_NUM_BUS_CLOCKS] /*bimc,snoc,pcnoc*/; +static char bus_clkname[USB_NUM_BUS_CLOCKS][20] = {"bimc_clk", "snoc_clk", + "pcnoc_clk"}; +static bool bus_clk_rate_set; + +static void dbg_inc(unsigned int *idx) +{ + *idx = (*idx + 1) & (DEBUG_MAX_MSG-1); +} + +static void +msm_otg_dbg_log_event(struct usb_phy *phy, char *event, int d1, int d2) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + unsigned long flags; + unsigned long long t; + unsigned long nanosec; + + if (!enable_dbg_log) + return; + + write_lock_irqsave(&motg->dbg_lock, flags); + t = cpu_clock(smp_processor_id()); + nanosec = do_div(t, 1000000000)/1000; + scnprintf(motg->buf[motg->dbg_idx], DEBUG_MSG_LEN, + "[%5lu.%06lu]: %s :%d:%d", + (unsigned long)t, nanosec, event, d1, d2); + + motg->dbg_idx++; + motg->dbg_idx = motg->dbg_idx % DEBUG_MAX_MSG; + write_unlock_irqrestore(&motg->dbg_lock, flags); +} + +static int msm_hsusb_ldo_init(struct msm_otg *motg, int init) +{ + int rc = 0; + + if (init) { + hsusb_3p3 = devm_regulator_get(motg->phy.dev, "HSUSB_3p3"); + if (IS_ERR(hsusb_3p3)) { + dev_err(motg->phy.dev, "unable to get hsusb 3p3\n"); + return PTR_ERR(hsusb_3p3); + } + + rc = regulator_set_voltage(hsusb_3p3, USB_PHY_3P3_VOL_MIN, + USB_PHY_3P3_VOL_MAX); + if (rc) { + dev_err(motg->phy.dev, "unable to set voltage level for hsusb 3p3\n" + ); + return rc; + } + hsusb_1p8 = devm_regulator_get(motg->phy.dev, "HSUSB_1p8"); + if (IS_ERR(hsusb_1p8)) { + dev_err(motg->phy.dev, "unable to get hsusb 1p8\n"); + rc = PTR_ERR(hsusb_1p8); + goto put_3p3_lpm; + } + rc = regulator_set_voltage(hsusb_1p8, USB_PHY_1P8_VOL_MIN, + USB_PHY_1P8_VOL_MAX); + if (rc) { + dev_err(motg->phy.dev, "unable to set voltage level for hsusb 1p8\n" + ); + goto put_1p8; + } + + return 0; + } + +put_1p8: + regulator_set_voltage(hsusb_1p8, 0, USB_PHY_1P8_VOL_MAX); +put_3p3_lpm: + regulator_set_voltage(hsusb_3p3, 0, USB_PHY_3P3_VOL_MAX); + return rc; +} + +static int msm_hsusb_config_vddcx(int high) +{ + struct msm_otg *motg = the_msm_otg; + int max_vol = vdd_val[VDD_MAX]; + int min_vol; + int ret; + + min_vol = vdd_val[!!high]; + ret = regulator_set_voltage(hsusb_vdd, min_vol, max_vol); + if (ret) { + pr_err("%s: unable to set the voltage for regulator HSUSB_VDDCX\n", + __func__); + return ret; + } + + pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol); + msm_otg_dbg_log_event(&motg->phy, "CONFIG VDDCX", min_vol, max_vol); + + return ret; +} + +static int msm_hsusb_ldo_enable(struct msm_otg *motg, + enum msm_otg_phy_reg_mode mode) +{ + int ret = 0; + + if (IS_ERR(hsusb_1p8)) { + pr_err("%s: HSUSB_1p8 is not initialized\n", __func__); + return -ENODEV; + } + + if (IS_ERR(hsusb_3p3)) { + pr_err("%s: HSUSB_3p3 is not initialized\n", __func__); + return -ENODEV; + } + + switch (mode) { + case USB_PHY_REG_ON: + ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set HPM of the regulator HSUSB_1p8\n", + __func__); + return ret; + } + + ret = regulator_enable(hsusb_1p8); + if (ret) { + dev_err(motg->phy.dev, "%s: unable to enable the hsusb 1p8\n", + __func__); + regulator_set_load(hsusb_1p8, 0); + return ret; + } + fallthrough; + case USB_PHY_REG_3P3_ON: + ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set HPM of the regulator HSUSB_3p3\n", + __func__); + if (mode == USB_PHY_REG_ON) { + regulator_set_load(hsusb_1p8, 0); + regulator_disable(hsusb_1p8); + } + return ret; + } + + ret = regulator_enable(hsusb_3p3); + if (ret) { + dev_err(motg->phy.dev, "%s: unable to enable the hsusb 3p3\n", + __func__); + regulator_set_load(hsusb_3p3, 0); + if (mode == USB_PHY_REG_ON) { + regulator_set_load(hsusb_1p8, 0); + regulator_disable(hsusb_1p8); + } + return ret; + } + + break; + + case USB_PHY_REG_OFF: + ret = regulator_disable(hsusb_1p8); + if (ret) { + dev_err(motg->phy.dev, "%s: unable to disable the hsusb 1p8\n", + __func__); + return ret; + } + + ret = regulator_set_load(hsusb_1p8, 0); + if (ret < 0) + pr_err("%s: Unable to set LPM of the regulator HSUSB_1p8\n", + __func__); + + fallthrough; + case USB_PHY_REG_3P3_OFF: + ret = regulator_disable(hsusb_3p3); + if (ret) { + dev_err(motg->phy.dev, "%s: unable to disable the hsusb 3p3\n", + __func__); + return ret; + } + ret = regulator_set_load(hsusb_3p3, 0); + if (ret < 0) + pr_err("%s: Unable to set LPM of the regulator HSUSB_3p3\n", + __func__); + + break; + + case USB_PHY_REG_LPM_ON: + ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_LPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set LPM of the regulator: HSUSB_1p8\n", + __func__); + return ret; + } + + ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_LPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set LPM of the regulator: HSUSB_3p3\n", + __func__); + regulator_set_load(hsusb_1p8, USB_PHY_REG_ON); + return ret; + } + + break; + + case USB_PHY_REG_LPM_OFF: + ret = regulator_set_load(hsusb_1p8, USB_PHY_1P8_HPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set HPM of the regulator: HSUSB_1p8\n", + __func__); + return ret; + } + + ret = regulator_set_load(hsusb_3p3, USB_PHY_3P3_HPM_LOAD); + if (ret < 0) { + pr_err("%s: Unable to set HPM of the regulator: HSUSB_3p3\n", + __func__); + regulator_set_load(hsusb_1p8, USB_PHY_REG_ON); + return ret; + } + + break; + + default: + pr_err("%s: Unsupported mode (%d).\n", __func__, mode); + return -EOPNOTSUPP; + } + + pr_debug("%s: USB reg mode (%d) (OFF/HPM/LPM)\n", __func__, mode); + msm_otg_dbg_log_event(&motg->phy, "USB REG MODE", mode, ret); + return ret < 0 ? ret : 0; +} + +static int ulpi_read(struct usb_phy *phy, u32 reg) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + int cnt = 0; + + if (motg->pdata->emulation) + return 0; + + if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) { + pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n", + __func__, reg); + return 0; + } + + /* initiate read operation */ + writel_relaxed(ULPI_RUN | ULPI_READ | ULPI_ADDR(reg), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while (cnt < ULPI_IO_TIMEOUT_USEC) { + if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN)) + break; + udelay(1); + cnt++; + } + + if (cnt >= ULPI_IO_TIMEOUT_USEC) { + dev_err(phy->dev, "%s: timeout %08x\n", __func__, + readl_relaxed(USB_ULPI_VIEWPORT)); + dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n", + readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD)); + return -ETIMEDOUT; + } + return ULPI_DATA_READ(readl_relaxed(USB_ULPI_VIEWPORT)); +} + +static int ulpi_write(struct usb_phy *phy, u32 val, u32 reg) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + int cnt = 0; + + if (motg->pdata->emulation) + return 0; + + if (motg->pdata->phy_type == QUSB_ULPI_PHY && reg > 0x3F) { + pr_debug("%s: ULPI vendor-specific reg 0x%02x not supported\n", + __func__, reg); + return 0; + } + + /* initiate write operation */ + writel_relaxed(ULPI_RUN | ULPI_WRITE | + ULPI_ADDR(reg) | ULPI_DATA(val), + USB_ULPI_VIEWPORT); + + /* wait for completion */ + while (cnt < ULPI_IO_TIMEOUT_USEC) { + if (!(readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN)) + break; + udelay(1); + cnt++; + } + + if (cnt >= ULPI_IO_TIMEOUT_USEC) { + dev_err(phy->dev, "%s: timeout\n", __func__); + dev_err(phy->dev, "PORTSC: %08x USBCMD: %08x\n", + readl_relaxed(USB_PORTSC), readl_relaxed(USB_USBCMD)); + return -ETIMEDOUT; + } + return 0; +} + +static struct usb_phy_io_ops msm_otg_io_ops = { + .read = ulpi_read, + .write = ulpi_write, +}; + +static void ulpi_init(struct msm_otg *motg) +{ + struct msm_otg_platform_data *pdata = motg->pdata; + int aseq[10]; + int *seq = NULL; + + if (override_phy_init) { + pr_debug("%s(): HUSB PHY Init:%s\n", __func__, + override_phy_init); + get_options(override_phy_init, ARRAY_SIZE(aseq), aseq); + seq = &aseq[1]; + } else { + seq = pdata->phy_init_seq; + } + + if (!seq) + return; + + while (seq[0] >= 0) { + if (override_phy_init) + pr_debug("ulpi: write 0x%02x to 0x%02x\n", + seq[0], seq[1]); + + dev_vdbg(motg->phy.dev, "ulpi: write 0x%02x to 0x%02x\n", + seq[0], seq[1]); + msm_otg_dbg_log_event(&motg->phy, "ULPI WRITE", seq[0], seq[1]); + ulpi_write(&motg->phy, seq[0], seq[1]); + seq += 2; + } +} + +static int msm_otg_phy_clk_reset(struct msm_otg *motg) +{ + int ret; + + if (!motg->phy_reset_clk && !motg->phy_reset) + return 0; + + if (motg->sleep_clk) + clk_disable_unprepare(motg->sleep_clk); + if (motg->phy_csr_clk) + clk_disable_unprepare(motg->phy_csr_clk); + + ret = reset_control_assert(motg->phy_reset); + if (ret) { + pr_err("phy_reset_clk assert failed %d\n", ret); + return ret; + } + /* + * As per databook, 10 usec delay is required between + * PHY POR assert and de-assert. + */ + usleep_range(10, 15); + ret = reset_control_deassert(motg->phy_reset); + if (ret) { + pr_err("phy_reset_clk de-assert failed %d\n", ret); + return ret; + } + /* + * As per databook, it takes 75 usec for PHY to stabilize + * after the reset. + */ + usleep_range(80, 100); + + if (motg->phy_csr_clk) + clk_prepare_enable(motg->phy_csr_clk); + if (motg->sleep_clk) + clk_prepare_enable(motg->sleep_clk); + + return 0; +} + +static int msm_otg_link_clk_reset(struct msm_otg *motg, bool assert) +{ + int ret; + + if (assert) { + /* Using asynchronous block reset to the hardware */ + dev_dbg(motg->phy.dev, "block_reset ASSERT\n"); + clk_disable_unprepare(motg->pclk); + clk_disable_unprepare(motg->core_clk); + ret = reset_control_assert(motg->core_reset); + if (ret) + dev_err(motg->phy.dev, "usb hs_clk assert failed\n"); + } else { + dev_dbg(motg->phy.dev, "block_reset DEASSERT\n"); + ret = reset_control_deassert(motg->core_reset); + ndelay(200); + ret = clk_prepare_enable(motg->core_clk); + WARN(ret, "USB core_clk enable failed\n"); + ret = clk_prepare_enable(motg->pclk); + WARN(ret, "USB pclk enable failed\n"); + if (ret) + dev_err(motg->phy.dev, "usb hs_clk deassert failed\n"); + } + return ret; +} + +static int msm_otg_phy_reset(struct msm_otg *motg) +{ + u32 val; + int ret; + struct msm_otg_platform_data *pdata = motg->pdata; + + /* + * AHB2AHB Bypass mode shouldn't be enable before doing + * async clock reset. If it is enable, disable the same. + */ + val = readl_relaxed(USB_AHBMODE); + if (val & AHB2AHB_BYPASS) { + pr_err("%s(): AHB2AHB_BYPASS SET: AHBMODE:%x\n", + __func__, val); + val &= ~AHB2AHB_BYPASS_BIT_MASK; + writel_relaxed(val | AHB2AHB_BYPASS_CLEAR, USB_AHBMODE); + pr_err("%s(): AHBMODE: %x\n", __func__, + readl_relaxed(USB_AHBMODE)); + } + + ret = msm_otg_link_clk_reset(motg, 1); + if (ret) + return ret; + + msm_otg_phy_clk_reset(motg); + + /* wait for 1ms delay as suggested in HPG. */ + usleep_range(1000, 1200); + + ret = msm_otg_link_clk_reset(motg, 0); + if (ret) + return ret; + + if (pdata && pdata->enable_sec_phy) + writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16), + USB_PHY_CTRL2); + val = readl_relaxed(USB_PORTSC) & ~PORTSC_PTS_MASK; + writel_relaxed(val | PORTSC_PTS_ULPI, USB_PORTSC); + + dev_info(motg->phy.dev, "phy_reset: success\n"); + msm_otg_dbg_log_event(&motg->phy, "PHY RESET SUCCESS", + motg->inputs, motg->phy.otg->state); + return 0; +} + +#define LINK_RESET_TIMEOUT_USEC (250 * 1000) +static int msm_otg_link_reset(struct msm_otg *motg) +{ + int cnt = 0; + struct msm_otg_platform_data *pdata = motg->pdata; + + writel_relaxed(USBCMD_RESET, USB_USBCMD); + while (cnt < LINK_RESET_TIMEOUT_USEC) { + if (!(readl_relaxed(USB_USBCMD) & USBCMD_RESET)) + break; + udelay(1); + cnt++; + } + if (cnt >= LINK_RESET_TIMEOUT_USEC) + return -ETIMEDOUT; + + /* select ULPI phy */ + writel_relaxed(0x80000000, USB_PORTSC); + writel_relaxed(0x0, USB_AHBBURST); + writel_relaxed(0x08, USB_AHBMODE); + + if (pdata && pdata->enable_sec_phy) + writel_relaxed(readl_relaxed(USB_PHY_CTRL2) | (1<<16), + USB_PHY_CTRL2); + return 0; +} + +#define QUSB2PHY_PORT_POWERDOWN 0xB4 +#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4 + +static void msm_usb_phy_reset(struct msm_otg *motg) +{ + u32 val; + int ret, *seq; + + switch (motg->pdata->phy_type) { + case SNPS_PICO_PHY: + /* Assert USB PHY_PON */ + val = readl_relaxed(motg->usb_phy_ctrl_reg); + val &= ~PHY_POR_BIT_MASK; + val |= PHY_POR_ASSERT; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + + /* wait for minimum 10 microseconds as + * suggested in HPG. + */ + usleep_range(10, 15); + + /* Deassert USB PHY_PON */ + val = readl_relaxed(motg->usb_phy_ctrl_reg); + val &= ~PHY_POR_BIT_MASK; + val |= PHY_POR_DEASSERT; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + break; + case QUSB_ULPI_PHY: + ret = reset_control_assert(motg->phy_reset); + if (ret) { + pr_err("phy_reset_clk assert failed %d\n", ret); + break; + } + + /* need to delay 10us for PHY to reset */ + usleep_range(10, 20); + + ret = reset_control_deassert(motg->phy_reset); + if (ret) { + pr_err("phy_reset_clk de-assert failed %d\n", ret); + break; + } + + /* Ensure that RESET operation is completed. */ + mb(); + + writel_relaxed(0x23, + motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN); + writel_relaxed(0x0, + motg->phy_csr_regs + QUSB2PHY_PORT_UTMI_CTRL2); + + /* Program tuning parameters for PHY */ + seq = motg->pdata->phy_init_seq; + if (seq) { + while (seq[0] >= 0) { + writel_relaxed(seq[1], + motg->phy_csr_regs + seq[0]); + seq += 2; + } + } + + /* ensure above writes are completed before re-enabling PHY */ + wmb(); + writel_relaxed(0x22, + motg->phy_csr_regs + QUSB2PHY_PORT_POWERDOWN); + break; + case SNPS_FEMTO_PHY: + if (!motg->phy_por_clk && !motg->phy_por_reset) { + pr_err("phy_por_clk missing\n"); + break; + } + ret = reset_control_assert(motg->phy_por_reset); + if (ret) { + pr_err("phy_por_clk assert failed %d\n", ret); + break; + } + /* + * The Femto PHY is POR reset in the following scenarios. + * + * 1. After overriding the parameter registers. + * 2. Low power mode exit from PHY retention. + * + * Ensure that SIDDQ is cleared before bringing the PHY + * out of reset. + * + */ + + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0); + val &= ~SIDDQ; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0); + + /* + * As per databook, 10 usec delay is required between + * PHY POR assert and de-assert. + */ + usleep_range(10, 20); + ret = reset_control_deassert(motg->phy_por_reset); + if (ret) { + pr_err("phy_por_clk de-assert failed %d\n", ret); + break; + } + /* + * As per databook, it takes 75 usec for PHY to stabilize + * after the reset. + */ + usleep_range(80, 100); + break; + default: + break; + } + /* Ensure that RESET operation is completed. */ + mb(); +} + +static void msm_chg_block_on(struct msm_otg *); + +static int msm_otg_reset(struct usb_phy *phy) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + struct msm_otg_platform_data *pdata = motg->pdata; + int ret; + u32 val = 0; + u32 ulpi_val = 0; + + mutex_lock(&motg->lock); + msm_otg_dbg_log_event(&motg->phy, "USB RESET", phy->otg->state, + get_pm_runtime_counter(phy->dev)); + /* + * USB PHY and Link reset also reset the USB BAM. + * Thus perform reset operation only once to avoid + * USB BAM reset on other cases e.g. USB cable disconnections. + * If hardware reported error then it must be reset for recovery. + */ + if (motg->err_event_seen) { + dev_info(phy->dev, "performing USB h/w reset for recovery\n"); + } else if (pdata->disable_reset_on_disconnect && + motg->reset_counter) { + mutex_unlock(&motg->lock); + return 0; + } + + motg->reset_counter++; + + disable_irq(motg->irq); + if (motg->phy_irq) + disable_irq(motg->phy_irq); + + ret = msm_otg_phy_reset(motg); + if (ret) { + dev_err(phy->dev, "phy_reset failed\n"); + if (motg->phy_irq) + enable_irq(motg->phy_irq); + + enable_irq(motg->irq); + mutex_unlock(&motg->lock); + return ret; + } + + if (motg->phy_irq) + enable_irq(motg->phy_irq); + + enable_irq(motg->irq); + ret = msm_otg_link_reset(motg); + if (ret) { + dev_err(phy->dev, "link reset failed\n"); + mutex_unlock(&motg->lock); + return ret; + } + + msleep(100); + + /* Reset USB PHY after performing USB Link RESET */ + msm_usb_phy_reset(motg); + + /* Program USB PHY Override registers. */ + ulpi_init(motg); + + /* + * It is required to reset USB PHY after programming + * the USB PHY Override registers to get the new + * values into effect. + */ + msm_usb_phy_reset(motg); + + if (pdata->otg_control == OTG_PHY_CONTROL) { + val = readl_relaxed(USB_OTGSC); + if (pdata->mode == USB_OTG) { + ulpi_val = ULPI_INT_IDGRD | ULPI_INT_SESS_VALID; + val |= OTGSC_IDIE | OTGSC_BSVIE; + } else if (pdata->mode == USB_PERIPHERAL) { + ulpi_val = ULPI_INT_SESS_VALID; + val |= OTGSC_BSVIE; + } + writel_relaxed(val, USB_OTGSC); + ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_RISE); + ulpi_write(phy, ulpi_val, ULPI_USB_INT_EN_FALL); + } else if (pdata->otg_control == OTG_PMIC_CONTROL) { + ulpi_write(phy, OTG_COMP_DISABLE, + ULPI_SET(ULPI_PWR_CLK_MNG_REG)); + if (motg->phy_irq) + writeb_relaxed(USB_PHY_ID_MASK, + USB2_PHY_USB_PHY_INTERRUPT_MASK1); + } + + if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) + writel_relaxed(readl_relaxed(USB_OTGSC) & ~(OTGSC_IDPU), + USB_OTGSC); + + msm_otg_dbg_log_event(&motg->phy, "USB RESET DONE", phy->otg->state, + get_pm_runtime_counter(phy->dev)); + + if (pdata->enable_axi_prefetch) + writel_relaxed(readl_relaxed(USB_HS_APF_CTRL) | (APF_CTRL_EN), + USB_HS_APF_CTRL); + + /* + * Disable USB BAM as block reset resets USB BAM registers. + */ + msm_usb_bam_enable(CI_CTRL, false); + + if (phy->otg->state == OTG_STATE_UNDEFINED && motg->rm_pulldown) + msm_chg_block_on(motg); + mutex_unlock(&motg->lock); + + return 0; +} + +static void msm_otg_kick_sm_work(struct msm_otg *motg) +{ + if (atomic_read(&motg->in_lpm)) + motg->resume_pending = true; + + /* For device mode, resume now. Let pm_resume handle other cases */ + if (atomic_read(&motg->pm_suspended) && + motg->phy.otg->state != OTG_STATE_B_SUSPEND) { + motg->sm_work_pending = true; + } else if (!motg->sm_work_pending) { + /* process event only if previous one is not pending */ + queue_work(motg->otg_wq, &motg->sm_work); + } +} + +/* + * UDC calls usb_phy_set_suspend() to notify during bus suspend/resume. + * Update relevant state-machine inputs and queue sm_work. + * LPM enter/exit doesn't happen directly from this routine. + */ + +static int msm_otg_set_suspend(struct usb_phy *phy, int suspend) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + + pr_debug("%s(%d) in %s state\n", __func__, suspend, + usb_otg_state_string(phy->otg->state)); + msm_otg_dbg_log_event(phy, "SET SUSPEND", suspend, phy->otg->state); + + if (!(motg->caps & ALLOW_LPM_ON_DEV_SUSPEND)) + return 0; + + if (suspend) { + /* called in suspend interrupt context */ + pr_debug("peripheral bus suspend\n"); + msm_otg_dbg_log_event(phy, "PERIPHERAL BUS SUSPEND", + motg->inputs, phy->otg->state); + + set_bit(A_BUS_SUSPEND, &motg->inputs); + } else { + /* host resume or remote-wakeup */ + pr_debug("peripheral bus resume\n"); + msm_otg_dbg_log_event(phy, "PERIPHERAL BUS RESUME", + motg->inputs, phy->otg->state); + + clear_bit(A_BUS_SUSPEND, &motg->inputs); + } + /* use kick_sm_work to handle race with pm_resume */ + msm_otg_kick_sm_work(motg); + + return 0; +} + +static int msm_otg_bus_freq_set(struct msm_otg *motg, enum usb_noc_mode mode) +{ + int i, ret; + long rate; + + for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) { + rate = bus_freqs[mode][i]; + if (!rate) { + pr_debug("%s rate not available\n", bus_clkname[i]); + continue; + } + + ret = clk_set_rate(motg->bus_clks[i], rate); + if (ret) { + pr_err("%s set rate failed: %d\n", bus_clkname[i], ret); + return ret; + } + pr_debug("%s set to %lu Hz\n", bus_clkname[i], + clk_get_rate(motg->bus_clks[i])); + msm_otg_dbg_log_event(&motg->phy, "OTG BUS FREQ SET", i, rate); + } + + bus_clk_rate_set = true; + + return 0; +} + +static int msm_otg_bus_freq_get(struct msm_otg *motg) +{ + struct device *dev = motg->phy.dev; + struct device_node *np = dev->of_node; + int len = 0, i, count = USB_NUM_BUS_CLOCKS; + + if (!np) + return -EINVAL; + + /* SVS requires extra set of frequencies for perf_mode sysfs node */ + if (motg->default_noc_mode == USB_NOC_SVS_VOTE) + count *= 2; + + len = of_property_count_elems_of_size(np, "qcom,bus-clk-rate", + sizeof(len)); + if (!len || (len != count)) { + pr_err("Invalid bus rate:%d %u\n", len, motg->default_noc_mode); + return -EINVAL; + } + of_property_read_u32_array(np, "qcom,bus-clk-rate", bus_freqs[0], + count); + for (i = 0; i < USB_NUM_BUS_CLOCKS; i++) { + if (bus_freqs[0][i] == 0) { + motg->bus_clks[i] = NULL; + pr_debug("%s not available\n", bus_clkname[i]); + continue; + } + + motg->bus_clks[i] = devm_clk_get(dev, bus_clkname[i]); + if (IS_ERR(motg->bus_clks[i])) { + pr_err("%s get failed\n", bus_clkname[i]); + return PTR_ERR(motg->bus_clks[i]); + } + } + return 0; +} + +static void msm_otg_update_bus_bw(struct msm_otg *motg, enum usb_bus_vote bv_index) +{ + + int ret = 0; + + ret = icc_set_bw(motg->icc_paths, bus_vote_values[bv_index][0].avg, + bus_vote_values[bv_index][0].peak); + + if (ret) + pr_err("bus bw voting path:%s bv:%d failed %d\n", + icc_path_names[0], bv_index, ret); + +} + +static void msm_otg_enable_phy_hv_int(struct msm_otg *motg) +{ + bool bsv_id_hv_int = false; + bool dp_dm_hv_int = false; + u32 val; + + if (motg->pdata->otg_control == OTG_PHY_CONTROL || + motg->phy_irq) + bsv_id_hv_int = true; + if (motg->host_bus_suspend || motg->device_bus_suspend) + dp_dm_hv_int = true; + + if (!bsv_id_hv_int && !dp_dm_hv_int) + return; + + switch (motg->pdata->phy_type) { + case SNPS_PICO_PHY: + val = readl_relaxed(motg->usb_phy_ctrl_reg); + if (bsv_id_hv_int) + val |= (PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN); + if (dp_dm_hv_int) + val |= PHY_CLAMP_DPDMSE_EN; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + break; + case SNPS_FEMTO_PHY: + if (bsv_id_hv_int) { + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1); + val |= ID_HV_CLAMP_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1); + } + + if (dp_dm_hv_int) { + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3); + val |= CLAMP_MPM_DPSE_DMSE_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3); + } + break; + default: + break; + } + pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n", + __func__, bsv_id_hv_int, dp_dm_hv_int); + msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR ENABLED", + bsv_id_hv_int, dp_dm_hv_int); +} + +static void msm_otg_disable_phy_hv_int(struct msm_otg *motg) +{ + bool bsv_id_hv_int = false; + bool dp_dm_hv_int = false; + u32 val; + + if (motg->pdata->otg_control == OTG_PHY_CONTROL || + motg->phy_irq) + bsv_id_hv_int = true; + if (motg->host_bus_suspend || motg->device_bus_suspend) + dp_dm_hv_int = true; + + if (!bsv_id_hv_int && !dp_dm_hv_int) + return; + + switch (motg->pdata->phy_type) { + case SNPS_PICO_PHY: + val = readl_relaxed(motg->usb_phy_ctrl_reg); + if (bsv_id_hv_int) + val &= ~(PHY_IDHV_INTEN | PHY_OTGSESSVLDHV_INTEN); + if (dp_dm_hv_int) + val &= ~PHY_CLAMP_DPDMSE_EN; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + break; + case SNPS_FEMTO_PHY: + if (bsv_id_hv_int) { + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL1); + val &= ~ID_HV_CLAMP_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL1); + } + + if (dp_dm_hv_int) { + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL3); + val &= ~CLAMP_MPM_DPSE_DMSE_EN_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL3); + } + break; + default: + break; + } + pr_debug("%s: bsv_id_hv = %d dp_dm_hv_int = %d\n", + __func__, bsv_id_hv_int, dp_dm_hv_int); + msm_otg_dbg_log_event(&motg->phy, "PHY HV INTR DISABLED", + bsv_id_hv_int, dp_dm_hv_int); +} + +static void msm_otg_enter_phy_retention(struct msm_otg *motg) +{ + u32 val; + + switch (motg->pdata->phy_type) { + case SNPS_PICO_PHY: + val = readl_relaxed(motg->usb_phy_ctrl_reg); + val &= ~PHY_RETEN; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + break; + case SNPS_FEMTO_PHY: + /* Retention is supported via SIDDQ */ + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL_COMMON0); + val |= SIDDQ; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL_COMMON0); + break; + default: + break; + } + pr_debug("USB PHY is in retention\n"); + msm_otg_dbg_log_event(&motg->phy, "USB PHY ENTER RETENTION", + motg->pdata->phy_type, 0); +} + +static void msm_otg_exit_phy_retention(struct msm_otg *motg) +{ + int val; + + switch (motg->pdata->phy_type) { + case SNPS_PICO_PHY: + val = readl_relaxed(motg->usb_phy_ctrl_reg); + val |= PHY_RETEN; + writel_relaxed(val, motg->usb_phy_ctrl_reg); + break; + case SNPS_FEMTO_PHY: + /* + * It is required to do USB block reset to bring Femto PHY out + * of retention. + */ + msm_otg_reset(&motg->phy); + break; + default: + break; + } + pr_debug("USB PHY is exited from retention\n"); + msm_otg_dbg_log_event(&motg->phy, "USB PHY EXIT RETENTION", + motg->pdata->phy_type, 0); +} + +static void msm_id_status_w(struct work_struct *w); +static irqreturn_t msm_otg_phy_irq_handler(int irq, void *data) +{ + struct msm_otg *motg = data; + + msm_otg_dbg_log_event(&motg->phy, "PHY ID IRQ", + atomic_read(&motg->in_lpm), motg->phy.otg->state); + if (atomic_read(&motg->in_lpm)) { + pr_debug("PHY ID IRQ in LPM\n"); + motg->phy_irq_pending = true; + msm_otg_kick_sm_work(motg); + } else { + pr_debug("PHY ID IRQ outside LPM\n"); + msm_id_status_w(&motg->id_status_work.work); + } + + return IRQ_HANDLED; +} + +#define PHY_SUSPEND_TIMEOUT_USEC (5 * 1000) +#define PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC 100 +#define PHY_RESUME_TIMEOUT_USEC (100 * 1000) + +#define PHY_SUSPEND_RETRIES_MAX 3 + +static void msm_otg_set_vbus_state(int online); +static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode); +static int get_psy_type(struct msm_otg *motg); + +#ifdef CONFIG_PM_SLEEP +static int msm_otg_suspend(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + struct usb_bus *bus = phy->otg->host; + struct msm_otg_platform_data *pdata = motg->pdata; + int cnt; + bool host_bus_suspend, device_bus_suspend, sm_work_busy; + bool host_pc_charger; + u32 cmd_val; + u32 portsc, config2; + u32 func_ctrl; + int phcd_retry_cnt = 0, ret; + unsigned int phy_suspend_timeout; + + cnt = 0; + msm_otg_dbg_log_event(phy, "LPM ENTER START", + motg->inputs, phy->otg->state); + + if (atomic_read(&motg->in_lpm)) + return 0; + + cancel_delayed_work_sync(&motg->perf_vote_work); + + disable_irq(motg->irq); + if (motg->phy_irq) + disable_irq(motg->phy_irq); +lpm_start: + host_bus_suspend = phy->otg->host && !test_bit(ID, &motg->inputs); + device_bus_suspend = phy->otg->gadget && test_bit(ID, &motg->inputs) && + test_bit(A_BUS_SUSPEND, &motg->inputs) && + motg->caps & ALLOW_LPM_ON_DEV_SUSPEND; + + if (host_bus_suspend) + msm_otg_perf_vote_update(motg, false); + + host_pc_charger = (motg->chg_type == USB_SDP_CHARGER) || + (motg->chg_type == USB_CDP_CHARGER) || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB) || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP); + msm_otg_dbg_log_event(phy, "CHARGER CONNECTED", + host_pc_charger, motg->inputs); + + /* !BSV, but its handling is in progress by otg sm_work */ + sm_work_busy = !test_bit(B_SESS_VLD, &motg->inputs) && + phy->otg->state == OTG_STATE_B_PERIPHERAL; + + /* Perform block reset to recover from UDC error events on disconnect */ + if (motg->err_event_seen) + msm_otg_reset(phy); + + /* + * Abort suspend when, + * 1. host mode activation in progress due to Micro-A cable insertion + * 2. !BSV, but its handling is in progress by otg sm_work + * Don't abort suspend in case of dcp detected by PMIC + */ + + if ((test_bit(B_SESS_VLD, &motg->inputs) && !device_bus_suspend && + host_pc_charger) || sm_work_busy) { + msm_otg_dbg_log_event(phy, "LPM ENTER ABORTED", + motg->inputs, 0); + enable_irq(motg->irq); + if (motg->phy_irq) + enable_irq(motg->phy_irq); + return -EBUSY; + } + + /* Enable line state difference wakeup fix for only device and host + * bus suspend scenarios. Otherwise PHY can not be suspended when + * a charger that pulls DP/DM high is connected. + */ + config2 = readl_relaxed(USB_GENCONFIG_2); + if (device_bus_suspend) + config2 |= GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; + else + config2 &= ~GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN; + writel_relaxed(config2, USB_GENCONFIG_2); + + if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) { + /* put the controller in non-driving mode */ + func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); + func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; + func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; + ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); + ulpi_write(phy, ULPI_IFC_CTRL_AUTORESUME, + ULPI_CLR(ULPI_IFC_CTRL)); + } + + /* + * PHY suspend sequence as mentioned in the databook. + * + * Device bus suspend: The controller may abort PHY suspend if + * there is an incoming reset or resume from the host. If PHCD + * is not set within 100 usec. Abort the LPM sequence. + * + * Host bus suspend: If the peripheral is attached, PHY is already + * put into suspend along with the peripheral bus suspend. poll for + * PHCD upto 5 msec. If the peripheral is not attached i.e entering + * LPM with Micro-A cable, set the PHCD and poll for it for 5 msec. + * + * No cable connected: Set the PHCD to suspend the PHY. Poll for PHCD + * upto 5 msec. + * + * The controller aborts PHY suspend only in device bus suspend case. + * In other cases, it is observed that PHCD may not get set within + * the timeout. If so, set the PHCD again and poll for it before + * reset recovery. + */ + +phcd_retry: + if (device_bus_suspend) + phy_suspend_timeout = PHY_DEVICE_BUS_SUSPEND_TIMEOUT_USEC; + else + phy_suspend_timeout = PHY_SUSPEND_TIMEOUT_USEC; + + cnt = 0; + portsc = readl_relaxed(USB_PORTSC); + if (!(portsc & PORTSC_PHCD)) { + writel_relaxed(portsc | PORTSC_PHCD, + USB_PORTSC); + while (cnt < phy_suspend_timeout) { + if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD) + break; + udelay(1); + cnt++; + } + } + + if (cnt >= phy_suspend_timeout) { + if (phcd_retry_cnt > PHY_SUSPEND_RETRIES_MAX) { + msm_otg_dbg_log_event(phy, "PHY SUSPEND FAILED", + phcd_retry_cnt, phy->otg->state); + dev_err(phy->dev, "PHY suspend failed\n"); + ret = -EBUSY; + goto phy_suspend_fail; + } + + if (device_bus_suspend) { + dev_dbg(phy->dev, "PHY suspend aborted\n"); + ret = -EBUSY; + goto phy_suspend_fail; + } else { + if (phcd_retry_cnt++ < PHY_SUSPEND_RETRIES_MAX) { + dev_dbg(phy->dev, "PHY suspend retry\n"); + goto phcd_retry; + } else { + dev_err(phy->dev, "reset attempt during PHY suspend\n"); + phcd_retry_cnt++; + motg->reset_counter = 0; + msm_otg_reset(phy); + goto lpm_start; + } + } + } + + /* + * PHY has capability to generate interrupt asynchronously in low + * power mode (LPM). This interrupt is level triggered. So USB IRQ + * line must be disabled till async interrupt enable bit is cleared + * in USBCMD register. Assert STP (ULPI interface STOP signal) to + * block data communication from PHY. + * + * PHY retention mode is disallowed while entering to LPM with wall + * charger connected. But PHY is put into suspend mode. Hence + * enable asynchronous interrupt to detect charger disconnection when + * PMIC notifications are unavailable. + */ + cmd_val = readl_relaxed(USB_USBCMD); + if (host_bus_suspend || device_bus_suspend || + (motg->pdata->otg_control == OTG_PHY_CONTROL)) + cmd_val |= ASYNC_INTR_CTRL | ULPI_STP_CTRL; + else + cmd_val |= ULPI_STP_CTRL; + writel_relaxed(cmd_val, USB_USBCMD); + + /* + * BC1.2 spec mandates PD to enable VDP_SRC when charging from DCP. + * PHY retention and collapse can not happen with VDP_SRC enabled. + */ + + + /* + * We come here in 3 scenarios. + * + * (1) No cable connected (out of session): + * - BSV/ID HV interrupts are enabled for PHY based detection. + * - PHY is put in retention. + * - If allowed (PMIC based detection), PHY is power collapsed. + * - DVDD (CX/MX) minimization and XO shutdown are allowed. + * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user. + * (2) USB wall charger: + * - BSV/ID HV interrupts are enabled for PHY based detection. + * - For BC1.2 compliant charger, retention is not allowed to + * keep VDP_SRC on. XO shutdown is allowed. + * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user. + * (3) Device/Host Bus suspend (if LPM is enabled): + * - BSV/ID HV interrupts are enabled for PHY based detection. + * - D+/D- MPM pin are configured to wakeup from line state + * change through PHY HV interrupts. PHY HV interrupts are + * also enabled. If MPM pins are not available, retention and + * XO is not allowed. + * - PHY is put into retention only if a gpio is used to keep + * the D+ pull-up. ALLOW_BUS_SUSPEND_WITHOUT_REWORK capability + * is set means, PHY can enable D+ pull-up or D+/D- pull-down + * without any re-work and PHY should not be put into retention. + * - DVDD (CX/MX) minimization and XO shutdown is allowed if + * ALLOW_BUS_SUSPEND_WITHOUT_REWORK is set (PHY DVDD is supplied + * via PMIC LDO) or board level re-work is present. + * - The wakeup is through VBUS/ID interrupt from PHY/PMIC/user + * or USB link asynchronous interrupt for line state change. + * + */ + motg->host_bus_suspend = host_bus_suspend; + motg->device_bus_suspend = device_bus_suspend; + + if (motg->caps & ALLOW_PHY_RETENTION && !device_bus_suspend && + (!host_bus_suspend || (motg->caps & + ALLOW_BUS_SUSPEND_WITHOUT_REWORK) || + ((motg->caps & ALLOW_HOST_PHY_RETENTION) + && (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS))))) { + msm_otg_enable_phy_hv_int(motg); + if ((!host_bus_suspend || !(motg->caps & + ALLOW_BUS_SUSPEND_WITHOUT_REWORK)) && + !(motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) { + msm_otg_enter_phy_retention(motg); + motg->lpm_flags |= PHY_RETENTIONED; + } + } else if (device_bus_suspend) { + /* DP DM HV interrupts are used for bus resume from XO off */ + msm_otg_enable_phy_hv_int(motg); + if (motg->caps & ALLOW_PHY_RETENTION && pdata->vddmin_gpio) { + + /* + * This is HW WA needed when PHY_CLAMP_DPDMSE_EN is + * enabled and we put the phy in retention mode. + * Without this WA, the async_irq will be fired right + * after suspending whithout any bus resume. + */ + config2 = readl_relaxed(USB_GENCONFIG_2); + config2 &= ~GENCONFIG_2_DPSE_DMSE_HV_INTR_EN; + writel_relaxed(config2, USB_GENCONFIG_2); + + msm_otg_enter_phy_retention(motg); + motg->lpm_flags |= PHY_RETENTIONED; + gpio_direction_output(pdata->vddmin_gpio, 1); + } + } + + /* Ensure that above operation is completed before turning off clocks */ + mb(); + /* Consider clocks on workaround flag only in case of bus suspend */ + if (!(phy->otg->state == OTG_STATE_B_PERIPHERAL && + test_bit(A_BUS_SUSPEND, &motg->inputs)) || + !motg->pdata->core_clk_always_on_workaround) { + clk_disable_unprepare(motg->pclk); + clk_disable_unprepare(motg->core_clk); + if (motg->phy_csr_clk) + clk_disable_unprepare(motg->phy_csr_clk); + motg->lpm_flags |= CLOCKS_DOWN; + } + + /* usb phy no more require TCXO clock, hence vote for TCXO disable */ + if (!host_bus_suspend || (motg->caps & + ALLOW_BUS_SUSPEND_WITHOUT_REWORK) || + ((motg->caps & ALLOW_HOST_PHY_RETENTION) && + (pdata->dpdm_pulldown_added || !(portsc & PORTSC_CCS)))) { + if (motg->xo_clk) { + clk_disable_unprepare(motg->xo_clk); + motg->lpm_flags |= XO_SHUTDOWN; + } + } + + if (motg->caps & ALLOW_PHY_POWER_COLLAPSE && + !host_bus_suspend && !device_bus_suspend) { + msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF); + motg->lpm_flags |= PHY_PWR_COLLAPSED; + } else if (motg->caps & ALLOW_PHY_REGULATORS_LPM && + !host_bus_suspend && !device_bus_suspend) { + msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_ON); + motg->lpm_flags |= PHY_REGULATORS_LPM; + } + + if (motg->lpm_flags & PHY_RETENTIONED || + (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) { + regulator_disable(hsusb_vdd); + msm_hsusb_config_vddcx(0); + } + + if (device_may_wakeup(phy->dev)) { + if (host_bus_suspend || device_bus_suspend) { + enable_irq_wake(motg->async_irq); + enable_irq_wake(motg->irq); + } + + if (motg->phy_irq) + enable_irq_wake(motg->phy_irq); + if (motg->pdata->pmic_id_irq) + enable_irq_wake(motg->pdata->pmic_id_irq); + if (motg->ext_id_irq) + enable_irq_wake(motg->ext_id_irq); + } + if (bus) + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags); + + msm_otg_update_bus_bw(motg, USB_NO_PERF_VOTE); + + atomic_set(&motg->in_lpm, 1); + + if (host_bus_suspend || device_bus_suspend) { + /* Enable ASYNC IRQ during LPM */ + enable_irq(motg->async_irq); + enable_irq(motg->irq); + } + if (motg->phy_irq) + enable_irq(motg->phy_irq); + + pm_relax(&motg->pdev->dev); + + dev_dbg(phy->dev, "LPM caps = %lu flags = %lu\n", + motg->caps, motg->lpm_flags); + dev_info(phy->dev, "USB in low power mode\n"); + msm_otg_dbg_log_event(phy, "LPM ENTER DONE", + motg->caps, motg->lpm_flags); + + if (motg->err_event_seen) { + motg->err_event_seen = false; + if (motg->vbus_state != test_bit(B_SESS_VLD, &motg->inputs)) + msm_otg_set_vbus_state(motg->vbus_state); + if (motg->id_state != test_bit(ID, &motg->inputs)) + msm_id_status_w(&motg->id_status_work.work); + } + + return 0; + +phy_suspend_fail: + enable_irq(motg->irq); + if (motg->phy_irq) + enable_irq(motg->phy_irq); + return ret; +} + +static int msm_otg_resume(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + struct usb_bus *bus = phy->otg->host; + struct usb_hcd *hcd = bus_to_hcd(phy->otg->host); + struct msm_otg_platform_data *pdata = motg->pdata; + int cnt = 0; + unsigned int temp; + unsigned int ret; + u32 func_ctrl; + + msm_otg_dbg_log_event(phy, "LPM EXIT START", motg->inputs, + phy->otg->state); + if (!atomic_read(&motg->in_lpm)) { + msm_otg_dbg_log_event(phy, "USB NOT IN LPM", + atomic_read(&motg->in_lpm), phy->otg->state); + return 0; + } + + pm_stay_awake(&motg->pdev->dev); + if (motg->phy_irq) + disable_irq(motg->phy_irq); + + if (motg->host_bus_suspend || motg->device_bus_suspend) + disable_irq(motg->irq); + + /* + * If we are resuming from the device bus suspend, restore + * the max performance bus vote. Otherwise put a minimum + * bus vote to satisfy the requirement for enabling clocks. + */ + + if (motg->device_bus_suspend && debug_bus_voting_enabled) + msm_otg_update_bus_bw(motg, USB_MAX_PERF_VOTE); + else + msm_otg_update_bus_bw(motg, USB_MIN_PERF_VOTE); + + + /* Vote for TCXO when waking up the phy */ + if (motg->lpm_flags & XO_SHUTDOWN) { + if (motg->xo_clk) + clk_prepare_enable(motg->xo_clk); + motg->lpm_flags &= ~XO_SHUTDOWN; + } + + if (motg->lpm_flags & CLOCKS_DOWN) { + if (motg->phy_csr_clk) { + ret = clk_prepare_enable(motg->phy_csr_clk); + WARN(ret, "USB phy_csr_clk enable failed\n"); + } + ret = clk_prepare_enable(motg->core_clk); + WARN(ret, "USB core_clk enable failed\n"); + ret = clk_prepare_enable(motg->pclk); + WARN(ret, "USB pclk enable failed\n"); + motg->lpm_flags &= ~CLOCKS_DOWN; + } + + if (motg->lpm_flags & PHY_PWR_COLLAPSED) { + msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON); + motg->lpm_flags &= ~PHY_PWR_COLLAPSED; + } else if (motg->lpm_flags & PHY_REGULATORS_LPM) { + msm_hsusb_ldo_enable(motg, USB_PHY_REG_LPM_OFF); + motg->lpm_flags &= ~PHY_REGULATORS_LPM; + } + + if (motg->lpm_flags & PHY_RETENTIONED || + (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED)) { + msm_hsusb_config_vddcx(1); + ret = regulator_enable(hsusb_vdd); + WARN(ret, "hsusb_vdd LDO enable failed\n"); + msm_otg_disable_phy_hv_int(motg); + msm_otg_exit_phy_retention(motg); + motg->lpm_flags &= ~PHY_RETENTIONED; + if (pdata->vddmin_gpio && motg->device_bus_suspend) + gpio_direction_input(pdata->vddmin_gpio); + } else if (motg->device_bus_suspend) { + msm_otg_disable_phy_hv_int(motg); + } + + temp = readl_relaxed(USB_USBCMD); + temp &= ~ASYNC_INTR_CTRL; + temp &= ~ULPI_STP_CTRL; + writel_relaxed(temp, USB_USBCMD); + + /* + * PHY comes out of low power mode (LPM) in case of wakeup + * from asynchronous interrupt. + */ + if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) + goto skip_phy_resume; + + writel_relaxed(readl_relaxed(USB_PORTSC) & ~PORTSC_PHCD, USB_PORTSC); + + while (cnt < PHY_RESUME_TIMEOUT_USEC) { + if (!(readl_relaxed(USB_PORTSC) & PORTSC_PHCD)) + break; + udelay(1); + cnt++; + } + + if (cnt >= PHY_RESUME_TIMEOUT_USEC) { + /* + * This is a fatal error. Reset the link and + * PHY. USB state can not be restored. Re-insertion + * of USB cable is the only way to get USB working. + */ + dev_err(phy->dev, "Unable to resume USB. Re-plugin the cable\n" + ); + msm_otg_reset(phy); + } + +skip_phy_resume: + if (motg->caps & ALLOW_VDD_MIN_WITH_RETENTION_DISABLED) { + /* put the controller in normal mode */ + func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); + func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; + func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL; + ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); + } + + if (device_may_wakeup(phy->dev)) { + if (motg->host_bus_suspend || motg->device_bus_suspend) { + disable_irq_wake(motg->async_irq); + disable_irq_wake(motg->irq); + } + + if (motg->phy_irq) + disable_irq_wake(motg->phy_irq); + if (motg->pdata->pmic_id_irq) + disable_irq_wake(motg->pdata->pmic_id_irq); + if (motg->ext_id_irq) + disable_irq_wake(motg->ext_id_irq); + } + if (bus) + set_bit(HCD_FLAG_HW_ACCESSIBLE, &(bus_to_hcd(bus))->flags); + + atomic_set(&motg->in_lpm, 0); + + if (motg->async_int) { + /* Match the disable_irq call from ISR */ + enable_irq(motg->async_int); + motg->async_int = 0; + } + if (motg->phy_irq) + enable_irq(motg->phy_irq); + enable_irq(motg->irq); + + /* Enable ASYNC_IRQ only during LPM */ + if (motg->host_bus_suspend || motg->device_bus_suspend) + disable_irq(motg->async_irq); + + if (motg->phy_irq_pending) { + motg->phy_irq_pending = false; + msm_id_status_w(&motg->id_status_work.work); + } + + if (motg->host_bus_suspend) { + usb_hcd_resume_root_hub(hcd); + schedule_delayed_work(&motg->perf_vote_work, + msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); + } + + dev_info(phy->dev, "USB exited from low power mode\n"); + msm_otg_dbg_log_event(phy, "LPM EXIT DONE", + motg->caps, motg->lpm_flags); + + return 0; +} +#endif + +static int get_psy_type(struct msm_otg *motg) +{ + union power_supply_propval pval = {0}; + + if (!psy) { + psy = power_supply_get_by_name("usb"); + if (!psy) { + dev_err(motg->phy.dev, "Could not get usb power_supply\n"); + return -ENODEV; + } + } + + power_supply_get_property(psy, POWER_SUPPLY_PROP_REAL_TYPE, &pval); + + return pval.intval; +} + +static int msm_otg_notify_chg_type(struct msm_otg *motg) +{ + static int charger_type; + union power_supply_propval propval; + int ret = 0; + /* + * TODO + * Unify OTG driver charger types and power supply charger types + */ + if (charger_type == motg->chg_type) + return 0; + + if (motg->chg_type == USB_SDP_CHARGER) + charger_type = POWER_SUPPLY_TYPE_USB; + else if (motg->chg_type == USB_CDP_CHARGER) + charger_type = POWER_SUPPLY_TYPE_USB_CDP; + else if (motg->chg_type == USB_DCP_CHARGER || + motg->chg_type == USB_NONCOMPLIANT_CHARGER) + charger_type = POWER_SUPPLY_TYPE_USB_DCP; + else if (motg->chg_type == USB_FLOATED_CHARGER) + charger_type = POWER_SUPPLY_TYPE_USB_FLOAT; + else + charger_type = POWER_SUPPLY_TYPE_UNKNOWN; + + pr_debug("Trying to set usb power supply type %d\n", charger_type); + + propval.intval = charger_type; + ret = power_supply_set_property(psy, POWER_SUPPLY_PROP_REAL_TYPE, + &propval); + if (ret) + dev_dbg(motg->phy.dev, "power supply error when setting property\n"); + + msm_otg_dbg_log_event(&motg->phy, "SET USB PWR SUPPLY TYPE", + motg->chg_type, charger_type); + return ret; +} + +static void msm_otg_notify_charger(struct msm_otg *motg, unsigned int mA) +{ + struct usb_gadget *g = motg->phy.otg->gadget; + union power_supply_propval pval = {0}; + int psy_type; + + if (g && g->is_a_peripheral) + return; + + dev_dbg(motg->phy.dev, "Requested curr from USB = %u\n", mA); + + psy_type = get_psy_type(motg); + if (psy_type == -ENODEV) + return; + + if (msm_otg_notify_chg_type(motg)) + dev_dbg(motg->phy.dev, "Failed notifying %d charger type to PMIC\n", + motg->chg_type); + + psy_type = get_psy_type(motg); + if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT || + (psy_type == POWER_SUPPLY_TYPE_USB && + motg->enable_sdp_check_timer)) { + if (!mA) { + pval.intval = -ETIMEDOUT; + goto set_prop; + } + } + + if (motg->cur_power == mA) + return; + + dev_info(motg->phy.dev, "Avail curr from USB = %u\n", mA); + msm_otg_dbg_log_event(&motg->phy, "AVAIL CURR FROM USB", mA, 0); + + /* Set max current limit in uA */ + pval.intval = 1000 * mA; + +set_prop: + if (power_supply_set_property(psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + &pval)) { + dev_dbg(motg->phy.dev, "power supply error when setting property\n"); + return; + } + + motg->cur_power = mA; +} + +static void msm_otg_notify_charger_work(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, + struct msm_otg, notify_charger_work); + + msm_otg_notify_charger(motg, motg->notify_current_mA); +} + +static int msm_otg_set_power(struct usb_phy *phy, unsigned int mA) +{ + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + + motg->notify_current_mA = mA; + /* + * Gadget driver uses set_power method to notify about the + * available current based on suspend/configured states. + */ + if (motg->chg_type == USB_SDP_CHARGER || + get_psy_type(motg) == POWER_SUPPLY_TYPE_USB || + get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT) + queue_work(motg->otg_wq, &motg->notify_charger_work); + + return 0; +} + +static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on); + +static void msm_otg_perf_vote_update(struct msm_otg *motg, bool perf_mode) +{ + static bool curr_perf_mode; + int ret, latency = motg->pm_qos_latency; + long clk_rate; + + if (curr_perf_mode == perf_mode) + return; + + if (perf_mode) { + if (latency) + cpu_latency_qos_update_request(&motg->pm_qos_req_dma, latency); + msm_otg_update_bus_bw(motg, USB_MAX_PERF_VOTE); + clk_rate = motg->core_clk_rate; + } else { + if (latency) + cpu_latency_qos_update_request(&motg->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + msm_otg_update_bus_bw(motg, USB_MIN_PERF_VOTE); + clk_rate = motg->core_clk_svs_rate; + } + + if (clk_rate) { + ret = clk_set_rate(motg->core_clk, clk_rate); + if (ret) + dev_err(motg->phy.dev, "sys_clk set_rate fail:%d %ld\n", + ret, clk_rate); + } + curr_perf_mode = perf_mode; + pr_debug("%s: latency updated to: %d, core_freq to: %ld\n", __func__, + latency, clk_rate); +} + +static void msm_otg_perf_vote_work(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, struct msm_otg, + perf_vote_work.work); + unsigned int curr_sample_int_count; + bool in_perf_mode = false; + + curr_sample_int_count = motg->usb_irq_count; + motg->usb_irq_count = 0; + + if (curr_sample_int_count >= PM_QOS_THRESHOLD) + in_perf_mode = true; + + msm_otg_perf_vote_update(motg, in_perf_mode); + pr_debug("%s: in_perf_mode:%u, interrupts in last sample:%u\n", + __func__, in_perf_mode, curr_sample_int_count); + + schedule_delayed_work(&motg->perf_vote_work, + msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); +} + +static void msm_otg_start_host(struct usb_otg *otg, int on) +{ + struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy); + struct msm_otg_platform_data *pdata = motg->pdata; + struct usb_hcd *hcd; + u32 val; + + if (!otg->host) + return; + + hcd = bus_to_hcd(otg->host); + + msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost GET", + get_pm_runtime_counter(motg->phy.dev), 0); + pm_runtime_get_sync(otg->usb_phy->dev); + if (on) { + dev_dbg(otg->usb_phy->dev, "host on\n"); + msm_otg_dbg_log_event(&motg->phy, "HOST ON", + motg->inputs, otg->state); + msm_hsusb_vbus_power(motg, 1); + msm_otg_reset(&motg->phy); + + if (pdata->otg_control == OTG_PHY_CONTROL) + ulpi_write(otg->usb_phy, OTG_COMP_DISABLE, + ULPI_SET(ULPI_PWR_CLK_MNG_REG)); + + if (pdata->enable_axi_prefetch) { + val = readl_relaxed(USB_HS_APF_CTRL); + val &= ~APF_CTRL_EN; + writel_relaxed(val, USB_HS_APF_CTRL); + } + usb_add_hcd(hcd, hcd->irq, IRQF_SHARED); +#ifdef CONFIG_SMP + motg->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ; + motg->pm_qos_req_dma.irq = motg->irq; +#endif + cpu_latency_qos_add_request(&motg->pm_qos_req_dma, + PM_QOS_DEFAULT_VALUE); + /* start in perf mode for better performance initially */ + msm_otg_perf_vote_update(motg, true); + schedule_delayed_work(&motg->perf_vote_work, + msecs_to_jiffies(1000 * PM_QOS_SAMPLE_SEC)); + } else { + dev_dbg(otg->usb_phy->dev, "host off\n"); + msm_otg_dbg_log_event(&motg->phy, "HOST OFF", + motg->inputs, otg->state); + msm_hsusb_vbus_power(motg, 0); + + cancel_delayed_work_sync(&motg->perf_vote_work); + msm_otg_perf_vote_update(motg, false); + cpu_latency_qos_remove_request(&motg->pm_qos_req_dma); + + pm_runtime_disable(&hcd->self.root_hub->dev); + pm_runtime_barrier(&hcd->self.root_hub->dev); + usb_remove_hcd(hcd); + msm_otg_reset(&motg->phy); + + if (pdata->enable_axi_prefetch) + writel_relaxed(readl_relaxed(USB_HS_APF_CTRL) + | (APF_CTRL_EN), USB_HS_APF_CTRL); + + /* HCD core reset all bits of PORTSC. select ULPI phy */ + writel_relaxed(0x80000000, USB_PORTSC); + + if (pdata->otg_control == OTG_PHY_CONTROL) + ulpi_write(otg->usb_phy, OTG_COMP_DISABLE, + ULPI_CLR(ULPI_PWR_CLK_MNG_REG)); + } + msm_otg_dbg_log_event(&motg->phy, "PM RT: StartHost PUT", + get_pm_runtime_counter(motg->phy.dev), 0); + + pm_runtime_mark_last_busy(otg->usb_phy->dev); + pm_runtime_put_autosuspend(otg->usb_phy->dev); +} + +static void msm_hsusb_vbus_power(struct msm_otg *motg, bool on) +{ + int ret; + static bool vbus_is_on; + + msm_otg_dbg_log_event(&motg->phy, "VBUS POWER", on, vbus_is_on); + if (vbus_is_on == on) + return; + + if (!vbus_otg) { + pr_err("vbus_otg is NULL.\n"); + return; + } + + /* + * if entering host mode tell the charger to not draw any current + * from usb before turning on the boost. + * if exiting host mode disable the boost before enabling to draw + * current from the source. + */ + if (on) { + ret = regulator_enable(vbus_otg); + if (ret) { + pr_err("unable to enable vbus_otg\n"); + return; + } + vbus_is_on = true; + } else { + ret = regulator_disable(vbus_otg); + if (ret) { + pr_err("unable to disable vbus_otg\n"); + return; + } + vbus_is_on = false; + } +} + +static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host) +{ + struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy); + struct usb_hcd *hcd; + + /* + * Fail host registration if this board can support + * only peripheral configuration. + */ + if (motg->pdata->mode == USB_PERIPHERAL) { + dev_info(otg->usb_phy->dev, "Host mode is not supported\n"); + return -ENODEV; + } + + if (host) { + vbus_otg = devm_regulator_get(motg->phy.dev, "vbus_otg"); + if (IS_ERR(vbus_otg)) { + msm_otg_dbg_log_event(&motg->phy, + "UNABLE TO GET VBUS_OTG", + otg->state, 0); + pr_err("Unable to get vbus_otg\n"); + return PTR_ERR(vbus_otg); + } + } else { + if (otg->state == OTG_STATE_A_HOST) { + msm_otg_start_host(otg, 0); + otg->host = NULL; + otg->state = OTG_STATE_UNDEFINED; + queue_work(motg->otg_wq, &motg->sm_work); + } else { + otg->host = NULL; + } + + return 0; + } + + hcd = bus_to_hcd(host); + hcd->power_budget = motg->pdata->power_budget; + + otg->host = host; + dev_dbg(otg->usb_phy->dev, "host driver registered w/ transceiver\n"); + msm_otg_dbg_log_event(&motg->phy, "HOST DRIVER REGISTERED", + hcd->power_budget, motg->pdata->mode); + + /* + * Kick the state machine work, if peripheral is not supported + * or peripheral is already registered with us. + */ + if (motg->pdata->mode == USB_HOST || otg->gadget) + queue_work(motg->otg_wq, &motg->sm_work); + + return 0; +} + +static void msm_otg_start_peripheral(struct usb_otg *otg, int on) +{ + struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy); + struct msm_otg_platform_data *pdata = motg->pdata; + struct pinctrl_state *set_state; + int ret; + + if (!otg->gadget) + return; + + msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri GET", + get_pm_runtime_counter(motg->phy.dev), 0); + pm_runtime_get_sync(otg->usb_phy->dev); + if (on) { + dev_dbg(otg->usb_phy->dev, "gadget on\n"); + msm_otg_dbg_log_event(&motg->phy, "GADGET ON", + motg->inputs, otg->state); + + /* Configure BUS performance parameters for MAX bandwidth */ + if (debug_bus_voting_enabled) + msm_otg_update_bus_bw(motg, USB_MAX_PERF_VOTE); + /* bump up usb core_clk to default */ + clk_set_rate(motg->core_clk, motg->core_clk_rate); + + usb_gadget_vbus_connect(otg->gadget); + + /* + * Request VDD min gpio, if need to support VDD + * minimazation during peripheral bus suspend. + */ + if (pdata->vddmin_gpio) { + if (motg->phy_pinctrl) { + set_state = + pinctrl_lookup_state(motg->phy_pinctrl, + "hsusb_active"); + if (IS_ERR(set_state)) { + pr_err("cannot get phy pinctrl active state\n"); + } else { + pinctrl_select_state(motg->phy_pinctrl, + set_state); + } + } + + ret = gpio_request(pdata->vddmin_gpio, + "MSM_OTG_VDD_MIN_GPIO"); + if (ret < 0) { + dev_err(otg->usb_phy->dev, "gpio req failed for vdd min:%d\n", + ret); + pdata->vddmin_gpio = 0; + } + } + } else { + dev_dbg(otg->usb_phy->dev, "gadget off\n"); + msm_otg_dbg_log_event(&motg->phy, "GADGET OFF", + motg->inputs, otg->state); + usb_gadget_vbus_disconnect(otg->gadget); + clear_bit(A_BUS_SUSPEND, &motg->inputs); + /* Configure BUS performance parameters to default */ + msm_otg_update_bus_bw(motg, USB_MIN_PERF_VOTE); + + if (pdata->vddmin_gpio) { + gpio_free(pdata->vddmin_gpio); + if (motg->phy_pinctrl) { + set_state = + pinctrl_lookup_state(motg->phy_pinctrl, + "hsusb_sleep"); + if (IS_ERR(set_state)) + pr_err("cannot get phy pinctrl sleep state\n"); + else + pinctrl_select_state(motg->phy_pinctrl, + set_state); + } + } + } + msm_otg_dbg_log_event(&motg->phy, "PM RT: StartPeri PUT", + get_pm_runtime_counter(motg->phy.dev), 0); + pm_runtime_mark_last_busy(otg->usb_phy->dev); + pm_runtime_put_autosuspend(otg->usb_phy->dev); +} + +static int msm_otg_set_peripheral(struct usb_otg *otg, + struct usb_gadget *gadget) +{ + struct msm_otg *motg = container_of(otg->usb_phy, struct msm_otg, phy); + + /* + * Fail peripheral registration if this board can support + * only host configuration. + */ + if (motg->pdata->mode == USB_HOST) { + dev_info(otg->usb_phy->dev, "Peripheral mode is not supported\n"); + return -ENODEV; + } + + if (!gadget) { + if (otg->state == OTG_STATE_B_PERIPHERAL) { + msm_otg_dbg_log_event(&motg->phy, + "PM RUNTIME: PERIPHERAL GET1", + get_pm_runtime_counter(otg->usb_phy->dev), 0); + msm_otg_start_peripheral(otg, 0); + otg->gadget = NULL; + otg->state = OTG_STATE_UNDEFINED; + queue_work(motg->otg_wq, &motg->sm_work); + } else { + otg->gadget = NULL; + } + + return 0; + } + otg->gadget = gadget; + dev_dbg(otg->usb_phy->dev, "peripheral driver registered w/ transceiver\n"); + msm_otg_dbg_log_event(&motg->phy, "PERIPHERAL DRIVER REGISTERED", + otg->state, motg->pdata->mode); + + /* + * Kick the state machine work, if host is not supported + * or host is already registered with us. + */ + if (motg->pdata->mode == USB_PERIPHERAL || otg->host) + queue_work(motg->otg_wq, &motg->sm_work); + + return 0; +} + +static bool msm_otg_read_pmic_id_state(struct msm_otg *motg) +{ + unsigned long flags; + bool id; + int ret; + + if (!motg->pdata->pmic_id_irq) + return -ENODEV; + + local_irq_save(flags); + ret = irq_get_irqchip_state(motg->pdata->pmic_id_irq, + IRQCHIP_STATE_LINE_LEVEL, &id); + local_irq_restore(flags); + + /* + * If we can not read ID line state for some reason, treat + * it as float. This would prevent MHL discovery and kicking + * host mode unnecessarily. + */ + if (ret < 0) + return true; + + return !!id; +} + +static bool msm_otg_read_phy_id_state(struct msm_otg *motg) +{ + u8 val; + + /* + * clear the pending/outstanding interrupts and + * read the ID status from the SRC_STATUS register. + */ + writeb_relaxed(USB_PHY_ID_MASK, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1); + + writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD); + /* + * Databook says 200 usec delay is required for + * clearing the interrupts. + */ + udelay(200); + writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD); + + val = readb_relaxed(USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS); + if (val & USB_PHY_IDDIG_1_0) + return false; /* ID is grounded */ + else + return true; +} + +static bool msm_chg_check_secondary_det(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + u32 chg_det; + + chg_det = ulpi_read(phy, 0x87); + + return (chg_det & 1); +} + +static void msm_chg_enable_secondary_det(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + + /* + * Configure DM as current source, DP as current sink + * and enable battery charging comparators. + */ + ulpi_write(phy, 0x8, 0x85); + ulpi_write(phy, 0x2, 0x85); + ulpi_write(phy, 0x1, 0x85); +} + +static bool msm_chg_check_primary_det(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + u32 chg_det; + bool ret = false; + + chg_det = ulpi_read(phy, 0x87); + ret = chg_det & 1; + /* Turn off VDP_SRC */ + ulpi_write(phy, 0x3, 0x86); + msleep(20); + + return ret; +} + +static void msm_chg_enable_primary_det(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + + /* + * Configure DP as current source, DM as current sink + * and enable battery charging comparators. + */ + ulpi_write(phy, 0x2, 0x85); + ulpi_write(phy, 0x1, 0x85); +} + +static bool msm_chg_check_dcd(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + u32 line_state; + + line_state = ulpi_read(phy, 0x87); + + return line_state & 2; +} + +static void msm_chg_disable_dcd(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + + ulpi_write(phy, 0x10, 0x86); + /* + * Disable the Rdm_down after + * the DCD is completed. + */ + ulpi_write(phy, 0x04, 0x0C); +} + +static void msm_chg_enable_dcd(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + + /* + * Idp_src and Rdm_down are de-coupled + * on Femto PHY. If Idp_src alone is + * enabled, DCD timeout is observed with + * wall charger. But a genuine DCD timeout + * may be incorrectly interpreted. Also + * BC1.2 compliance testers expect Rdm_down + * to enabled during DCD. Enable Rdm_down + * explicitly before enabling the DCD. + */ + ulpi_write(phy, 0x04, 0x0B); + ulpi_write(phy, 0x10, 0x85); +} + +static void msm_chg_block_on(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + u32 func_ctrl; + + /* put the controller in non-driving mode */ + msm_otg_dbg_log_event(&motg->phy, "PHY NON DRIVE", 0, 0); + func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); + func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; + func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; + ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); + + /* disable DP and DM pull down resistors */ + ulpi_write(phy, 0x6, 0xC); + /* Clear charger detecting control bits */ + ulpi_write(phy, 0x1F, 0x86); + /* Clear alt interrupt latch and enable bits */ + ulpi_write(phy, 0x1F, 0x92); + ulpi_write(phy, 0x1F, 0x95); + udelay(100); +} + +static void msm_chg_block_off(struct msm_otg *motg) +{ + struct usb_phy *phy = &motg->phy; + u32 func_ctrl; + + /* Clear charger detecting control bits */ + ulpi_write(phy, 0x3F, 0x86); + /* Clear alt interrupt latch and enable bits */ + ulpi_write(phy, 0x1F, 0x92); + ulpi_write(phy, 0x1F, 0x95); + /* re-enable DP and DM pull down resistors */ + ulpi_write(phy, 0x6, 0xB); + + /* put the controller in normal mode */ + msm_otg_dbg_log_event(&motg->phy, "PHY MODE NORMAL", 0, 0); + func_ctrl = ulpi_read(phy, ULPI_FUNC_CTRL); + func_ctrl &= ~ULPI_FUNC_CTRL_OPMODE_MASK; + func_ctrl |= ULPI_FUNC_CTRL_OPMODE_NORMAL; + ulpi_write(phy, func_ctrl, ULPI_FUNC_CTRL); +} + +static void msm_otg_set_mode_nondriving(struct msm_otg *motg, + bool mode_nondriving) +{ + clk_prepare_enable(motg->xo_clk); + clk_prepare_enable(motg->phy_csr_clk); + clk_prepare_enable(motg->core_clk); + clk_prepare_enable(motg->pclk); + + msm_otg_exit_phy_retention(motg); + + if (mode_nondriving) + msm_chg_block_on(motg); + else + msm_chg_block_off(motg); + + msm_otg_enter_phy_retention(motg); + + clk_disable_unprepare(motg->pclk); + clk_disable_unprepare(motg->core_clk); + clk_disable_unprepare(motg->phy_csr_clk); + clk_disable_unprepare(motg->xo_clk); +} + +#define MSM_CHG_DCD_TIMEOUT (750 * HZ/1000) /* 750 msec */ +#define MSM_CHG_DCD_POLL_TIME (50 * HZ/1000) /* 50 msec */ +#define MSM_CHG_PRIMARY_DET_TIME (50 * HZ/1000) /* TVDPSRC_ON */ +#define MSM_CHG_SECONDARY_DET_TIME (50 * HZ/1000) /* TVDMSRC_ON */ + +static void msm_chg_detect_work(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, struct msm_otg, chg_work.work); + struct usb_phy *phy = &motg->phy; + bool is_dcd = false, tmout, vout, queue_sm_work = false; + static bool dcd; + u32 line_state, dm_vlgc; + unsigned long delay = 0; + + dev_dbg(phy->dev, "chg detection work\n"); + msm_otg_dbg_log_event(phy, "CHG DETECTION WORK", + motg->chg_state, get_pm_runtime_counter(phy->dev)); + + switch (motg->chg_state) { + case USB_CHG_STATE_UNDEFINED: + pm_runtime_get_sync(phy->dev); + msm_chg_block_on(motg); + fallthrough; + case USB_CHG_STATE_IN_PROGRESS: + if (!motg->vbus_state) { + motg->chg_state = USB_CHG_STATE_UNDEFINED; + motg->chg_type = USB_INVALID_CHARGER; + msm_chg_block_off(motg); + pm_runtime_put_sync(phy->dev); + return; + } + + msm_chg_enable_dcd(motg); + motg->chg_state = USB_CHG_STATE_WAIT_FOR_DCD; + motg->dcd_time = 0; + delay = MSM_CHG_DCD_POLL_TIME; + break; + case USB_CHG_STATE_WAIT_FOR_DCD: + if (!motg->vbus_state) { + motg->chg_state = USB_CHG_STATE_IN_PROGRESS; + break; + } + + is_dcd = msm_chg_check_dcd(motg); + motg->dcd_time += MSM_CHG_DCD_POLL_TIME; + tmout = motg->dcd_time >= MSM_CHG_DCD_TIMEOUT; + if (is_dcd || tmout) { + if (is_dcd) + dcd = true; + else + dcd = false; + msm_chg_disable_dcd(motg); + msm_chg_enable_primary_det(motg); + delay = MSM_CHG_PRIMARY_DET_TIME; + motg->chg_state = USB_CHG_STATE_DCD_DONE; + } else { + delay = MSM_CHG_DCD_POLL_TIME; + } + break; + case USB_CHG_STATE_DCD_DONE: + if (!motg->vbus_state) { + motg->chg_state = USB_CHG_STATE_IN_PROGRESS; + break; + } + + vout = msm_chg_check_primary_det(motg); + line_state = readl_relaxed(USB_PORTSC) & PORTSC_LS; + dm_vlgc = line_state & PORTSC_LS_DM; + if (vout && !dm_vlgc) { /* VDAT_REF < DM < VLGC */ + if (line_state) { /* DP > VLGC */ + motg->chg_type = USB_NONCOMPLIANT_CHARGER; + motg->chg_state = USB_CHG_STATE_DETECTED; + } else { + msm_chg_enable_secondary_det(motg); + delay = MSM_CHG_SECONDARY_DET_TIME; + motg->chg_state = USB_CHG_STATE_PRIMARY_DONE; + } + } else { /* DM < VDAT_REF || DM > VLGC */ + if (line_state) /* DP > VLGC or/and DM > VLGC */ + motg->chg_type = USB_NONCOMPLIANT_CHARGER; + else if (!dcd && floated_charger_enable) + motg->chg_type = USB_FLOATED_CHARGER; + else + motg->chg_type = USB_SDP_CHARGER; + + motg->chg_state = USB_CHG_STATE_DETECTED; + } + break; + case USB_CHG_STATE_PRIMARY_DONE: + if (!motg->vbus_state) { + motg->chg_state = USB_CHG_STATE_IN_PROGRESS; + break; + } + + vout = msm_chg_check_secondary_det(motg); + if (vout) + motg->chg_type = USB_DCP_CHARGER; + else + motg->chg_type = USB_CDP_CHARGER; + motg->chg_state = USB_CHG_STATE_SECONDARY_DONE; + fallthrough; + case USB_CHG_STATE_SECONDARY_DONE: + motg->chg_state = USB_CHG_STATE_DETECTED; + fallthrough; + case USB_CHG_STATE_DETECTED: + if (!motg->vbus_state) { + motg->chg_state = USB_CHG_STATE_IN_PROGRESS; + break; + } + + msm_chg_block_off(motg); + + /* Enable VDP_SRC in case of DCP charger */ + if (motg->chg_type == USB_DCP_CHARGER) { + ulpi_write(phy, 0x2, 0x85); + msm_otg_notify_charger(motg, dcp_max_current); + } else if (motg->chg_type == USB_NONCOMPLIANT_CHARGER) + msm_otg_notify_charger(motg, dcp_max_current); + else if (motg->chg_type == USB_FLOATED_CHARGER || + motg->chg_type == USB_CDP_CHARGER) + msm_otg_notify_charger(motg, IDEV_CHG_MAX); + + msm_otg_dbg_log_event(phy, "CHG WORK PUT: CHG_TYPE", + motg->chg_type, get_pm_runtime_counter(phy->dev)); + /* to match _get at the start of chg_det_work */ + pm_runtime_mark_last_busy(phy->dev); + pm_runtime_put_autosuspend(phy->dev); + motg->chg_state = USB_CHG_STATE_QUEUE_SM_WORK; + break; + case USB_CHG_STATE_QUEUE_SM_WORK: + if (!motg->vbus_state) { + pm_runtime_get_sync(phy->dev); + /* Turn off VDP_SRC if charger is DCP type */ + if (motg->chg_type == USB_DCP_CHARGER) + ulpi_write(phy, 0x2, 0x86); + + motg->chg_state = USB_CHG_STATE_UNDEFINED; + if (motg->chg_type == USB_SDP_CHARGER || + motg->chg_type == USB_CDP_CHARGER) + queue_sm_work = true; + + motg->chg_type = USB_INVALID_CHARGER; + msm_otg_notify_charger(motg, 0); + motg->cur_power = 0; + msm_chg_block_off(motg); + pm_runtime_mark_last_busy(phy->dev); + pm_runtime_put_autosuspend(phy->dev); + if (queue_sm_work) + queue_work(motg->otg_wq, &motg->sm_work); + else + return; + } + + if (motg->chg_type == USB_CDP_CHARGER || + motg->chg_type == USB_SDP_CHARGER) + queue_work(motg->otg_wq, &motg->sm_work); + + return; + default: + return; + } + + msm_otg_dbg_log_event(phy, "CHG WORK: QUEUE", motg->chg_type, delay); + queue_delayed_work(motg->otg_wq, &motg->chg_work, delay); +} + +/* + * We support OTG, Peripheral only and Host only configurations. In case + * of OTG, mode switch (host-->peripheral/peripheral-->host) can happen + * via Id pin status or user request (debugfs). Id/BSV interrupts are not + * enabled when switch is controlled by user and default mode is supplied + * by board file, which can be changed by userspace later. + */ +static void msm_otg_init_sm(struct msm_otg *motg) +{ + struct msm_otg_platform_data *pdata = motg->pdata; + u32 otgsc = readl_relaxed(USB_OTGSC); + + switch (pdata->mode) { + case USB_OTG: + if (pdata->otg_control == OTG_USER_CONTROL) { + if (pdata->default_mode == USB_HOST) { + clear_bit(ID, &motg->inputs); + } else if (pdata->default_mode == USB_PERIPHERAL) { + set_bit(ID, &motg->inputs); + set_bit(B_SESS_VLD, &motg->inputs); + } else { + set_bit(ID, &motg->inputs); + clear_bit(B_SESS_VLD, &motg->inputs); + } + } else if (pdata->otg_control == OTG_PHY_CONTROL) { + if (otgsc & OTGSC_ID) + set_bit(ID, &motg->inputs); + else + clear_bit(ID, &motg->inputs); + if (otgsc & OTGSC_BSV) + set_bit(B_SESS_VLD, &motg->inputs); + else + clear_bit(B_SESS_VLD, &motg->inputs); + } else if (pdata->otg_control == OTG_PMIC_CONTROL) { + if (pdata->pmic_id_irq) { + if (msm_otg_read_pmic_id_state(motg)) + set_bit(ID, &motg->inputs); + else + clear_bit(ID, &motg->inputs); + } else if (motg->ext_id_irq) { + if (gpio_get_value(pdata->usb_id_gpio)) + set_bit(ID, &motg->inputs); + else + clear_bit(ID, &motg->inputs); + } else if (motg->phy_irq) { + if (msm_otg_read_phy_id_state(motg)) { + set_bit(ID, &motg->inputs); + if (pdata->phy_id_high_as_peripheral) + set_bit(B_SESS_VLD, + &motg->inputs); + } else { + clear_bit(ID, &motg->inputs); + if (pdata->phy_id_high_as_peripheral) + clear_bit(B_SESS_VLD, + &motg->inputs); + } + } + } + break; + case USB_HOST: + clear_bit(ID, &motg->inputs); + break; + case USB_PERIPHERAL: + set_bit(ID, &motg->inputs); + if (pdata->otg_control == OTG_PHY_CONTROL) { + if (otgsc & OTGSC_BSV) + set_bit(B_SESS_VLD, &motg->inputs); + else + clear_bit(B_SESS_VLD, &motg->inputs); + } else if (pdata->otg_control == OTG_USER_CONTROL) { + set_bit(ID, &motg->inputs); + set_bit(B_SESS_VLD, &motg->inputs); + } + break; + default: + break; + } + msm_otg_dbg_log_event(&motg->phy, "SM INIT", pdata->mode, motg->inputs); + if (motg->id_state != USB_ID_GROUND) + motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT : + USB_ID_GROUND; +} + +static void check_for_sdp_connection(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, struct msm_otg, sdp_check.work); + + /* Cable disconnected or device enumerated as SDP */ + if (!motg->vbus_state || motg->phy.otg->gadget->state > + USB_STATE_DEFAULT) + return; + + /* floating D+/D- lines detected */ + motg->vbus_state = 0; + msm_otg_dbg_log_event(&motg->phy, "Q RW SDP CHK", motg->vbus_state, 0); + msm_otg_set_vbus_state(motg->vbus_state); +} + +#define DP_PULSE_WIDTH_MSEC 200 +static int +msm_otg_phy_drive_dp_pulse(struct msm_otg *motg, unsigned int pulse_width); + +static void msm_otg_sm_work(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, struct msm_otg, sm_work); + struct usb_phy *phy = &motg->phy; + struct usb_otg *otg = motg->phy.otg; + struct device *dev = otg->usb_phy->dev; + bool work = false; + int ret; + + pr_debug("%s work\n", usb_otg_state_string(otg->state)); + msm_otg_dbg_log_event(phy, "SM WORK:", otg->state, motg->inputs); + + /* Just resume h/w if reqd, pm_count is handled based on state/inputs */ + if (motg->resume_pending) { + pm_runtime_get_sync(dev); + if (atomic_read(&motg->in_lpm)) { + dev_err(dev, "SM WORK: USB is in LPM\n"); + msm_otg_dbg_log_event(phy, "SM WORK: USB IS IN LPM", + otg->state, motg->inputs); + msm_otg_resume(motg); + } + motg->resume_pending = false; + pm_runtime_put_noidle(dev); + } + + switch (otg->state) { + case OTG_STATE_UNDEFINED: + pm_runtime_get_sync(dev); + msm_otg_reset(otg->usb_phy); + /* Add child device only after block reset */ + ret = of_platform_populate(motg->pdev->dev.of_node, NULL, NULL, + &motg->pdev->dev); + if (ret) + dev_dbg(&motg->pdev->dev, "failed to add BAM core\n"); + + msm_otg_init_sm(motg); + otg->state = OTG_STATE_B_IDLE; + if (!test_bit(B_SESS_VLD, &motg->inputs) && + test_bit(ID, &motg->inputs)) { + msm_otg_dbg_log_event(phy, "PM RUNTIME: UNDEF PUT", + get_pm_runtime_counter(dev), 0); + pm_runtime_put_sync(dev); + break; + } else if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP) { + pr_debug("Connected to CDP, pull DP up from sm_work\n"); + msm_otg_phy_drive_dp_pulse(motg, DP_PULSE_WIDTH_MSEC); + } + pm_runtime_put(dev); + fallthrough; + case OTG_STATE_B_IDLE: + if (!test_bit(ID, &motg->inputs) && otg->host) { + pr_debug("!id\n"); + msm_otg_dbg_log_event(phy, "!ID", + motg->inputs, otg->state); + if (!otg->host) { + msm_otg_dbg_log_event(phy, + "SM WORK: Host Not Set", + otg->state, motg->inputs); + break; + } + + msm_otg_start_host(otg, 1); + otg->state = OTG_STATE_A_HOST; + } else if (test_bit(B_SESS_VLD, &motg->inputs)) { + pr_debug("b_sess_vld\n"); + msm_otg_dbg_log_event(phy, "B_SESS_VLD", + motg->inputs, otg->state); + if (!otg->gadget) { + msm_otg_dbg_log_event(phy, + "SM WORK: Gadget Not Set", + otg->state, motg->inputs); + break; + } + + pm_runtime_get_sync(otg->usb_phy->dev); + msm_otg_start_peripheral(otg, 1); + if (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB && + motg->enable_sdp_check_timer)) { + queue_delayed_work(motg->otg_wq, + &motg->sdp_check, + msecs_to_jiffies( + (phy->flags & PHY_SOFT_CONNECT) ? + SDP_CHECK_DELAY_MS : + SDP_CHECK_BOOT_DELAY_MS)); + } + otg->state = OTG_STATE_B_PERIPHERAL; + } else { + pr_debug("Cable disconnected\n"); + msm_otg_dbg_log_event(phy, "RT: Cable DISC", + get_pm_runtime_counter(dev), 0); + msm_otg_notify_charger(motg, 0); + pm_runtime_autosuspend(dev); + } + break; + case OTG_STATE_B_PERIPHERAL: + if (!test_bit(B_SESS_VLD, &motg->inputs)) { + cancel_delayed_work_sync(&motg->sdp_check); + msm_otg_start_peripheral(otg, 0); + msm_otg_dbg_log_event(phy, "RT PM: B_PERI A PUT", + get_pm_runtime_counter(dev), 0); + /* Schedule work to finish cable disconnect processing*/ + otg->state = OTG_STATE_B_IDLE; + /* _put for _get done on cable connect in B_IDLE */ + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + work = true; + } else if (test_bit(A_BUS_SUSPEND, &motg->inputs)) { + pr_debug("a_bus_suspend\n"); + msm_otg_dbg_log_event(phy, "BUS_SUSPEND: PM RT PUT", + get_pm_runtime_counter(dev), 0); + otg->state = OTG_STATE_B_SUSPEND; + /* _get on connect in B_IDLE or host resume in B_SUSP */ + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + break; + case OTG_STATE_B_SUSPEND: + if (!test_bit(B_SESS_VLD, &motg->inputs)) { + cancel_delayed_work_sync(&motg->sdp_check); + msm_otg_start_peripheral(otg, 0); + otg->state = OTG_STATE_B_IDLE; + /* Schedule work to finish cable disconnect processing*/ + work = true; + } else if (!test_bit(A_BUS_SUSPEND, &motg->inputs)) { + pr_debug("!a_bus_suspend\n"); + otg->state = OTG_STATE_B_PERIPHERAL; + msm_otg_dbg_log_event(phy, "BUS_RESUME: PM RT GET", + get_pm_runtime_counter(dev), 0); + pm_runtime_get_sync(dev); + } + break; + case OTG_STATE_A_HOST: + if (test_bit(ID, &motg->inputs)) { + msm_otg_start_host(otg, 0); + otg->state = OTG_STATE_B_IDLE; + work = true; + } + break; + default: + break; + } + + if (work) + queue_work(motg->otg_wq, &motg->sm_work); +} + +static irqreturn_t msm_otg_irq(int irq, void *data) +{ + struct msm_otg *motg = data; + struct usb_otg *otg = motg->phy.otg; + u32 otgsc = 0; + bool work = false; + + if (atomic_read(&motg->in_lpm)) { + pr_debug("OTG IRQ: %d in LPM\n", irq); + msm_otg_dbg_log_event(&motg->phy, "OTG IRQ IS IN LPM", + irq, otg->state); + /*Ignore interrupt if one interrupt already seen in LPM*/ + if (motg->async_int) + return IRQ_HANDLED; + + disable_irq_nosync(irq); + motg->async_int = irq; + msm_otg_kick_sm_work(motg); + + return IRQ_HANDLED; + } + motg->usb_irq_count++; + + otgsc = readl_relaxed(USB_OTGSC); + if (!(otgsc & (OTGSC_IDIS | OTGSC_BSVIS))) + return IRQ_NONE; + + if ((otgsc & OTGSC_IDIS) && (otgsc & OTGSC_IDIE)) { + if (otgsc & OTGSC_ID) { + dev_dbg(otg->usb_phy->dev, "ID set\n"); + msm_otg_dbg_log_event(&motg->phy, "ID SET", + motg->inputs, otg->state); + set_bit(ID, &motg->inputs); + } else { + dev_dbg(otg->usb_phy->dev, "ID clear\n"); + msm_otg_dbg_log_event(&motg->phy, "ID CLEAR", + motg->inputs, otg->state); + clear_bit(ID, &motg->inputs); + } + work = true; + } else if ((otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) { + if (otgsc & OTGSC_BSV) { + dev_dbg(otg->usb_phy->dev, "BSV set\n"); + msm_otg_dbg_log_event(&motg->phy, "BSV SET", + motg->inputs, otg->state); + set_bit(B_SESS_VLD, &motg->inputs); + } else { + dev_dbg(otg->usb_phy->dev, "BSV clear\n"); + msm_otg_dbg_log_event(&motg->phy, "BSV CLEAR", + motg->inputs, otg->state); + clear_bit(B_SESS_VLD, &motg->inputs); + clear_bit(A_BUS_SUSPEND, &motg->inputs); + } + work = true; + } + if (work) + queue_work(motg->otg_wq, &motg->sm_work); + + writel_relaxed(otgsc, USB_OTGSC); + + return IRQ_HANDLED; +} + +static int +msm_otg_phy_drive_dp_pulse(struct msm_otg *motg, unsigned int pulse_width) +{ + u32 val; + + msm_otg_dbg_log_event(&motg->phy, "DRIVE DP PULSE", motg->inputs, + get_pm_runtime_counter(motg->phy.dev)); + + /* + * We may come here with hardware in LPM, come out + * LPM and prevent any further transitions to LPM + * while DP pulse is driven. + */ + pm_runtime_get_sync(motg->phy.dev); + if (atomic_read(&motg->in_lpm)) { + pm_runtime_put_noidle(motg->phy.dev); + msm_otg_dbg_log_event(&motg->phy, "LPM FAIL", + motg->inputs, + get_pm_runtime_counter(motg->phy.dev)); + return -EACCES; + } + + msm_otg_exit_phy_retention(motg); + + val = readb_relaxed(USB_PHY_CSR_PHY_CTRL2); + val |= USB2_SUSPEND_N; + writeb_relaxed(val, USB_PHY_CSR_PHY_CTRL2); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL1); + val &= ~XCVR_SEL_MASK; + val |= (DM_PULLDOWN | DP_PULLDOWN | 0x1); + writeb_relaxed(val, USB_PHY_CSR_PHY_UTMI_CTRL1); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL0); + val &= ~OP_MODE_MASK; + val |= (TERM_SEL | SLEEP_M | 0x20); + writeb_relaxed(val, USB_PHY_CSR_PHY_UTMI_CTRL0); + + val = readb_relaxed(USB_PHY_CSR_PHY_CFG0); + writeb_relaxed(0x6, USB_PHY_CSR_PHY_CFG0); + + writeb_relaxed( + (readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL0) | PORT_SELECT), + USB_PHY_CSR_PHY_UTMI_CTRL0); + + usleep_range(10, 20); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL0); + val &= ~PORT_SELECT; + writeb_relaxed(val, USB_PHY_CSR_PHY_UTMI_CTRL0); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL2); + writeb_relaxed(0xFF, USB_PHY_CSR_PHY_UTMI_CTRL2); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL4); + val |= TX_VALID; + writeb_relaxed(val, USB_PHY_CSR_PHY_UTMI_CTRL4); + + msleep(pulse_width); + + val = readb_relaxed(USB_PHY_CSR_PHY_UTMI_CTRL4); + val &= ~TX_VALID; + writeb_relaxed(val, USB_PHY_CSR_PHY_UTMI_CTRL4); + + msm_otg_reset(&motg->phy); + + /* + * The state machine work will run shortly which + * takes care of putting the hardware in LPM. + */ + pm_runtime_put_noidle(motg->phy.dev); + msm_otg_dbg_log_event(&motg->phy, "DP PULSE DRIVEN", + motg->inputs, + get_pm_runtime_counter(motg->phy.dev)); + return 0; +} + +static void msm_otg_set_vbus_state(int online) +{ + struct msm_otg *motg = the_msm_otg; + struct usb_otg *otg = motg->phy.otg; + + motg->vbus_state = online; + + if (motg->err_event_seen) + return; + + if (online) { + pr_debug("EXTCON: BSV set\n"); + msm_otg_dbg_log_event(&motg->phy, "EXTCON: BSV SET", + motg->inputs, 0); + if (test_and_set_bit(B_SESS_VLD, &motg->inputs)) + return; + /* + * It might race with block reset happening in sm_work, while + * state machine is in undefined state. Add check to avoid it. + */ + if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_CDP) && + (otg->state != OTG_STATE_UNDEFINED)) { + pr_debug("Connected to CDP, pull DP up\n"); + msm_otg_phy_drive_dp_pulse(motg, DP_PULSE_WIDTH_MSEC); + } + } else { + pr_debug("EXTCON: BSV clear\n"); + msm_otg_dbg_log_event(&motg->phy, "EXTCON: BSV CLEAR", + motg->inputs, 0); + if (!test_and_clear_bit(B_SESS_VLD, &motg->inputs)) + return; + } + + msm_otg_dbg_log_event(&motg->phy, "CHECK VBUS EVENT DURING SUSPEND", + atomic_read(&motg->pm_suspended), + motg->sm_work_pending); + + /* Move to host mode on vbus low if required */ + if (motg->pdata->vbus_low_as_hostmode) { + if (!test_bit(B_SESS_VLD, &motg->inputs)) + clear_bit(ID, &motg->inputs); + else + set_bit(ID, &motg->inputs); + } + + /* + * Enable PHY based charger detection in 2 cases: + * 1. PMI not capable of doing charger detection and provides VBUS + * notification with UNKNOWN psy type. + * 2. Data lines have been cut off from PMI, in which case it provides + * VBUS notification with FLOAT psy type and we want to do PHY based + * charger detection by setting 'chg_detection_for_float_charger'. + */ + if (test_bit(B_SESS_VLD, &motg->inputs) && !motg->chg_detection) { + if ((get_psy_type(motg) == POWER_SUPPLY_TYPE_UNKNOWN) || + (get_psy_type(motg) == POWER_SUPPLY_TYPE_USB_FLOAT && + chg_detection_for_float_charger)) + motg->chg_detection = true; + } + + if (motg->chg_detection) + queue_delayed_work(motg->otg_wq, &motg->chg_work, 0); + else + msm_otg_kick_sm_work(motg); +} + +static void msm_id_status_w(struct work_struct *w) +{ + struct msm_otg *motg = container_of(w, struct msm_otg, + id_status_work.work); + int work = 0; + + dev_dbg(motg->phy.dev, "ID status_w\n"); + + if (motg->pdata->pmic_id_irq) + motg->id_state = msm_otg_read_pmic_id_state(motg); + else if (motg->ext_id_irq) + motg->id_state = gpio_get_value(motg->pdata->usb_id_gpio); + else if (motg->phy_irq) + motg->id_state = msm_otg_read_phy_id_state(motg); + + if (motg->err_event_seen) + return; + + if (motg->id_state) { + if (gpio_is_valid(motg->pdata->switch_sel_gpio)) + gpio_direction_input(motg->pdata->switch_sel_gpio); + if (!test_and_set_bit(ID, &motg->inputs)) { + pr_debug("ID set\n"); + if (motg->pdata->phy_id_high_as_peripheral) + set_bit(B_SESS_VLD, &motg->inputs); + msm_otg_dbg_log_event(&motg->phy, "ID SET", + motg->inputs, motg->phy.otg->state); + work = 1; + } + } else { + if (gpio_is_valid(motg->pdata->switch_sel_gpio)) + gpio_direction_output(motg->pdata->switch_sel_gpio, 1); + if (test_and_clear_bit(ID, &motg->inputs)) { + pr_debug("ID clear\n"); + if (motg->pdata->phy_id_high_as_peripheral) + clear_bit(B_SESS_VLD, &motg->inputs); + msm_otg_dbg_log_event(&motg->phy, "ID CLEAR", + motg->inputs, motg->phy.otg->state); + work = 1; + } + } + + if (work && (motg->phy.otg->state != OTG_STATE_UNDEFINED)) { + msm_otg_dbg_log_event(&motg->phy, + "CHECK ID EVENT DURING SUSPEND", + atomic_read(&motg->pm_suspended), + motg->sm_work_pending); + msm_otg_kick_sm_work(motg); + } +} + +#define MSM_ID_STATUS_DELAY 5 /* 5msec */ +static irqreturn_t msm_id_irq(int irq, void *data) +{ + struct msm_otg *motg = data; + + /*schedule delayed work for 5msec for ID line state to settle*/ + queue_delayed_work(motg->otg_wq, &motg->id_status_work, + msecs_to_jiffies(MSM_ID_STATUS_DELAY)); + + return IRQ_HANDLED; +} + +int msm_otg_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + struct msm_otg *motg = container_of( + notify_block, struct msm_otg, pm_notify); + + dev_dbg(motg->phy.dev, "OTG PM notify:%lx, sm_pending:%u\n", mode, + motg->sm_work_pending); + msm_otg_dbg_log_event(&motg->phy, "PM NOTIFY", + mode, motg->sm_work_pending); + + switch (mode) { + case PM_POST_SUSPEND: + /* OTG sm_work can be armed now */ + atomic_set(&motg->pm_suspended, 0); + + /* Handle any deferred wakeup events from USB during suspend */ + if (motg->sm_work_pending) { + motg->sm_work_pending = false; + queue_work(motg->otg_wq, &motg->sm_work); + } + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static int msm_otg_mode_show(struct seq_file *s, void *unused) +{ + struct msm_otg *motg = s->private; + struct usb_otg *otg = motg->phy.otg; + + switch (otg->state) { + case OTG_STATE_A_HOST: + seq_puts(s, "host\n"); + break; + case OTG_STATE_B_IDLE: + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_SUSPEND: + seq_puts(s, "peripheral\n"); + break; + default: + seq_puts(s, "none\n"); + break; + } + + return 0; +} + +static int msm_otg_mode_open(struct inode *inode, struct file *file) +{ + return single_open(file, msm_otg_mode_show, inode->i_private); +} + +static ssize_t msm_otg_mode_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct msm_otg *motg = s->private; + char buf[16]; + struct usb_phy *phy = &motg->phy; + int status = count; + enum usb_mode_type req_mode; + + memset(buf, 0x00, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) { + status = -EFAULT; + goto out; + } + + if (!strcmp(buf, "host")) { + req_mode = USB_HOST; + } else if (!strcmp(buf, "peripheral")) { + req_mode = USB_PERIPHERAL; + } else if (!strcmp(buf, "none")) { + req_mode = USB_NONE; + } else { + status = -EINVAL; + goto out; + } + + switch (req_mode) { + case USB_NONE: + switch (phy->otg->state) { + case OTG_STATE_A_HOST: + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_SUSPEND: + set_bit(ID, &motg->inputs); + clear_bit(B_SESS_VLD, &motg->inputs); + break; + default: + goto out; + } + break; + case USB_PERIPHERAL: + switch (phy->otg->state) { + case OTG_STATE_B_IDLE: + case OTG_STATE_A_HOST: + set_bit(ID, &motg->inputs); + set_bit(B_SESS_VLD, &motg->inputs); + break; + default: + goto out; + } + break; + case USB_HOST: + switch (phy->otg->state) { + case OTG_STATE_B_IDLE: + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_SUSPEND: + clear_bit(ID, &motg->inputs); + break; + default: + goto out; + } + break; + default: + goto out; + } + + motg->id_state = (test_bit(ID, &motg->inputs)) ? USB_ID_FLOAT : + USB_ID_GROUND; + queue_work(motg->otg_wq, &motg->sm_work); +out: + return status; +} + +const struct file_operations msm_otg_mode_fops = { + .open = msm_otg_mode_open, + .read = seq_read, + .write = msm_otg_mode_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static int msm_otg_show_otg_state(struct seq_file *s, void *unused) +{ + struct msm_otg *motg = s->private; + struct usb_phy *phy = &motg->phy; + + seq_printf(s, "%s\n", usb_otg_state_string(phy->otg->state)); + return 0; +} + +static int msm_otg_otg_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, msm_otg_show_otg_state, inode->i_private); +} + +const struct file_operations msm_otg_state_fops = { + .open = msm_otg_otg_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int msm_otg_bus_show(struct seq_file *s, void *unused) +{ + if (debug_bus_voting_enabled) + seq_puts(s, "enabled\n"); + else + seq_puts(s, "disabled\n"); + + return 0; +} + +static int msm_otg_bus_open(struct inode *inode, struct file *file) +{ + return single_open(file, msm_otg_bus_show, inode->i_private); +} + +static ssize_t msm_otg_bus_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char buf[8]; + struct seq_file *s = file->private_data; + struct msm_otg *motg = s->private; + + memset(buf, 0x00, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + if (!strcmp(buf, "enable")) { + /* Do not vote here. Let OTG statemachine decide when to vote */ + debug_bus_voting_enabled = true; + } else { + debug_bus_voting_enabled = false; + msm_otg_update_bus_bw(motg, USB_MIN_PERF_VOTE); + } + + return count; +} + +static int msm_otg_dbg_buff_show(struct seq_file *s, void *unused) +{ + struct msm_otg *motg = s->private; + unsigned long flags; + unsigned int i; + + read_lock_irqsave(&motg->dbg_lock, flags); + + i = motg->dbg_idx; + if (strnlen(motg->buf[i], DEBUG_MSG_LEN)) + seq_printf(s, "%s\n", motg->buf[i]); + for (dbg_inc(&i); i != motg->dbg_idx; dbg_inc(&i)) { + if (!strnlen(motg->buf[i], DEBUG_MSG_LEN)) + continue; + seq_printf(s, "%s\n", motg->buf[i]); + } + read_unlock_irqrestore(&motg->dbg_lock, flags); + + return 0; +} + +static int msm_otg_dbg_buff_open(struct inode *inode, struct file *file) +{ + return single_open(file, msm_otg_dbg_buff_show, inode->i_private); +} + +const struct file_operations msm_otg_dbg_buff_fops = { + .open = msm_otg_dbg_buff_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int msm_otg_dpdm_regulator_enable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_otg *motg = rdev_get_drvdata(rdev); + struct usb_phy *phy = &motg->phy; + + if (!motg->rm_pulldown) { + msm_otg_dbg_log_event(&motg->phy, "Disable Pulldown", + motg->rm_pulldown, 0); + ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_ON); + if (ret) + return ret; + + motg->rm_pulldown = true; + /* Don't reset h/w if previous disconnect handling is pending */ + if (phy->otg->state == OTG_STATE_B_IDLE || + phy->otg->state == OTG_STATE_UNDEFINED) + msm_otg_set_mode_nondriving(motg, true); + else + msm_otg_dbg_log_event(&motg->phy, "NonDrv err", + motg->rm_pulldown, 0); + } + + return ret; +} + +static int msm_otg_dpdm_regulator_disable(struct regulator_dev *rdev) +{ + int ret = 0; + struct msm_otg *motg = rdev_get_drvdata(rdev); + struct usb_phy *phy = &motg->phy; + + if (motg->rm_pulldown) { + /* Let sm_work handle it if USB core is active */ + if (phy->otg->state == OTG_STATE_B_IDLE || + phy->otg->state == OTG_STATE_UNDEFINED) + msm_otg_set_mode_nondriving(motg, false); + + ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_3P3_OFF); + if (ret) + return ret; + + motg->rm_pulldown = false; + msm_otg_dbg_log_event(&motg->phy, "EN Pulldown", + motg->rm_pulldown, 0); + } + + return ret; +} + +static int msm_otg_dpdm_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct msm_otg *motg = rdev_get_drvdata(rdev); + + return motg->rm_pulldown; +} + +static const struct regulator_ops msm_otg_dpdm_regulator_ops = { + .enable = msm_otg_dpdm_regulator_enable, + .disable = msm_otg_dpdm_regulator_disable, + .is_enabled = msm_otg_dpdm_regulator_is_enabled, +}; + +static int usb_phy_regulator_init(struct msm_otg *motg) +{ + struct device *dev = motg->phy.dev; + struct regulator_config cfg = {}; + struct regulator_init_data *init_data; + + init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL); + if (!init_data) + return -ENOMEM; + + init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS; + motg->dpdm_rdesc.owner = THIS_MODULE; + motg->dpdm_rdesc.type = REGULATOR_VOLTAGE; + motg->dpdm_rdesc.ops = &msm_otg_dpdm_regulator_ops; + motg->dpdm_rdesc.name = kbasename(dev->of_node->full_name); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.driver_data = motg; + cfg.of_node = dev->of_node; + + motg->dpdm_rdev = devm_regulator_register(dev, &motg->dpdm_rdesc, &cfg); + return PTR_ERR_OR_ZERO(motg->dpdm_rdev); +} + +const struct file_operations msm_otg_bus_fops = { + .open = msm_otg_bus_open, + .read = seq_read, + .write = msm_otg_bus_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static struct dentry *msm_otg_dbg_root; + +static int msm_otg_debugfs_init(struct msm_otg *motg) +{ + struct dentry *msm_otg_dentry; + struct msm_otg_platform_data *pdata = motg->pdata; + + msm_otg_dbg_root = debugfs_create_dir("msm_otg", NULL); + + if (!msm_otg_dbg_root || IS_ERR(msm_otg_dbg_root)) + return -ENODEV; + + if ((pdata->mode == USB_OTG || pdata->mode == USB_PERIPHERAL) && + pdata->otg_control == OTG_USER_CONTROL) { + + msm_otg_dentry = debugfs_create_file("mode", 0644, + msm_otg_dbg_root, motg, &msm_otg_mode_fops); + + if (!msm_otg_dentry) { + debugfs_remove(msm_otg_dbg_root); + msm_otg_dbg_root = NULL; + return -ENODEV; + } + } + + msm_otg_dentry = debugfs_create_file("bus_voting", 0644, + msm_otg_dbg_root, motg, &msm_otg_bus_fops); + + if (!msm_otg_dentry) { + debugfs_remove_recursive(msm_otg_dbg_root); + return -ENODEV; + } + + msm_otg_dentry = debugfs_create_file("otg_state", 0444, + msm_otg_dbg_root, motg, &msm_otg_state_fops); + + if (!msm_otg_dentry) { + debugfs_remove_recursive(msm_otg_dbg_root); + return -ENODEV; + } + + msm_otg_dentry = debugfs_create_file("dbg_buff", 0444, + msm_otg_dbg_root, motg, &msm_otg_dbg_buff_fops); + + if (!msm_otg_dentry) { + debugfs_remove_recursive(msm_otg_dbg_root); + return -ENODEV; + } + return 0; +} + +static void msm_otg_debugfs_cleanup(void) +{ + debugfs_remove_recursive(msm_otg_dbg_root); +} + +static ssize_t +perf_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msm_otg *motg = the_msm_otg; + int ret; + long clk_rate; + + pr_debug("%s: enable:%d\n", __func__, !strncasecmp(buf, "enable", 6)); + + if (!strncasecmp(buf, "enable", 6)) { + clk_rate = motg->core_clk_nominal_rate; + msm_otg_bus_freq_set(motg, USB_NOC_NOM_VOTE); + } else { + clk_rate = motg->core_clk_svs_rate; + msm_otg_bus_freq_set(motg, USB_NOC_SVS_VOTE); + } + + if (clk_rate) { + pr_debug("Set usb sys_clk rate:%ld\n", clk_rate); + ret = clk_set_rate(motg->core_clk, clk_rate); + if (ret) + pr_err("sys_clk set_rate fail:%d %ld\n", ret, clk_rate); + msm_otg_dbg_log_event(&motg->phy, "OTG PERF SET", + clk_rate, ret); + } else { + pr_err("usb sys_clk rate is undefined\n"); + } + + return count; +} + +static DEVICE_ATTR_WO(perf_mode); + +#define MSM_OTG_CMD_ID 0x09 +#define MSM_OTG_DEVICE_ID 0x04 +#define MSM_OTG_VMID_IDX 0xFF +#define MSM_OTG_MEM_TYPE 0x02 +struct msm_otg_scm_cmd_buf { + unsigned int device_id; + unsigned int vmid_idx; + unsigned int mem_type; +} __packed; + +static u64 msm_otg_dma_mask = DMA_BIT_MASK(32); +static struct platform_device *msm_otg_add_pdev( + struct platform_device *ofdev, const char *name) +{ + struct platform_device *pdev; + const struct resource *res = ofdev->resource; + unsigned int num = ofdev->num_resources; + int retval; + struct ci13xxx_platform_data ci_pdata; + struct msm_otg_platform_data *otg_pdata; + struct msm_otg *motg; + + pdev = platform_device_alloc(name, -1); + if (!pdev) { + retval = -ENOMEM; + goto error; + } + + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &msm_otg_dma_mask; + pdev->dev.parent = &ofdev->dev; + + if (num) { + retval = platform_device_add_resources(pdev, res, num); + if (retval) + goto error; + } + + if (!strcmp(name, "msm_hsusb")) { + otg_pdata = + (struct msm_otg_platform_data *) + ofdev->dev.platform_data; + motg = platform_get_drvdata(ofdev); + ci_pdata.log2_itc = otg_pdata->log2_itc; + ci_pdata.usb_core_id = 0; + ci_pdata.l1_supported = otg_pdata->l1_supported; + ci_pdata.enable_ahb2ahb_bypass = + otg_pdata->enable_ahb2ahb_bypass; + ci_pdata.enable_streaming = otg_pdata->enable_streaming; + ci_pdata.enable_axi_prefetch = otg_pdata->enable_axi_prefetch; + retval = platform_device_add_data(pdev, &ci_pdata, + sizeof(ci_pdata)); + if (retval) + goto error; + } + + arch_setup_dma_ops(&pdev->dev, 0, DMA_BIT_MASK(32), NULL, false); + retval = platform_device_add(pdev); + if (retval) + goto error; + + return pdev; + +error: + platform_device_put(pdev); + return ERR_PTR(retval); +} + +static int msm_otg_setup_devices(struct platform_device *ofdev, + enum usb_mode_type mode, bool init) +{ + const char *gadget_name = "msm_hsusb"; + const char *host_name = "msm_hsusb_host"; + static struct platform_device *gadget_pdev; + static struct platform_device *host_pdev; + int retval = 0; + + if (!init) { + if (gadget_pdev) { + device_remove_file(&gadget_pdev->dev, + &dev_attr_perf_mode); + platform_device_unregister(gadget_pdev); + } + if (host_pdev) + platform_device_unregister(host_pdev); + return 0; + } + + switch (mode) { + case USB_OTG: + case USB_PERIPHERAL: + gadget_pdev = msm_otg_add_pdev(ofdev, gadget_name); + if (IS_ERR(gadget_pdev)) { + retval = PTR_ERR(gadget_pdev); + break; + } + if (device_create_file(&gadget_pdev->dev, &dev_attr_perf_mode)) + dev_err(&gadget_pdev->dev, "perf_mode file failed\n"); + if (mode == USB_PERIPHERAL) + break; + fallthrough; + case USB_HOST: + host_pdev = msm_otg_add_pdev(ofdev, host_name); + if (IS_ERR(host_pdev)) { + retval = PTR_ERR(host_pdev); + if (mode == USB_OTG) { + platform_device_unregister(gadget_pdev); + device_remove_file(&gadget_pdev->dev, + &dev_attr_perf_mode); + } + } + break; + default: + break; + } + + return retval; +} + +static ssize_t dpdm_pulldown_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msm_otg *motg = the_msm_otg; + struct msm_otg_platform_data *pdata = motg->pdata; + + return scnprintf(buf, PAGE_SIZE, "%s\n", pdata->dpdm_pulldown_added ? + "enabled" : "disabled"); +} + +static ssize_t dpdm_pulldown_enable_store(struct device *dev, + struct device_attribute *attr, const char + *buf, size_t size) +{ + struct msm_otg *motg = the_msm_otg; + struct msm_otg_platform_data *pdata = motg->pdata; + + if (!strncasecmp(buf, "enable", 6)) { + pdata->dpdm_pulldown_added = true; + return size; + } else if (!strncasecmp(buf, "disable", 7)) { + pdata->dpdm_pulldown_added = false; + return size; + } + + return -EINVAL; +} + +static DEVICE_ATTR_RW(dpdm_pulldown_enable); + +static int msm_otg_vbus_notifier(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + msm_otg_set_vbus_state(!!event); + + return NOTIFY_DONE; +} + +static int msm_otg_id_notifier(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct usb_phy *phy = container_of(nb, struct usb_phy, id_nb); + struct msm_otg *motg = container_of(phy, struct msm_otg, phy); + + if (event) + motg->id_state = USB_ID_GROUND; + else + motg->id_state = USB_ID_FLOAT; + + msm_id_status_w(&motg->id_status_work.work); + + return NOTIFY_DONE; +} + +struct msm_otg_platform_data *msm_otg_dt_to_pdata(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct msm_otg_platform_data *pdata; + int len = 0; + int res_gpio; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + len = of_property_count_elems_of_size(node, + "qcom,hsusb-otg-phy-init-seq", sizeof(len)); + if (len > 0) { + pdata->phy_init_seq = devm_kzalloc(&pdev->dev, + len * sizeof(len), GFP_KERNEL); + if (!pdata->phy_init_seq) + return NULL; + of_property_read_u32_array(node, "qcom,hsusb-otg-phy-init-seq", + pdata->phy_init_seq, len); + } + of_property_read_u32(node, "qcom,hsusb-otg-power-budget", + &pdata->power_budget); + of_property_read_u32(node, "qcom,hsusb-otg-mode", + &pdata->mode); + of_property_read_u32(node, "qcom,hsusb-otg-otg-control", + &pdata->otg_control); + of_property_read_u32(node, "qcom,hsusb-otg-default-mode", + &pdata->default_mode); + of_property_read_u32(node, "qcom,hsusb-otg-phy-type", + &pdata->phy_type); + pdata->disable_reset_on_disconnect = of_property_read_bool(node, + "qcom,hsusb-otg-disable-reset"); + pdata->enable_lpm_on_dev_suspend = of_property_read_bool(node, + "qcom,hsusb-otg-lpm-on-dev-suspend"); + pdata->core_clk_always_on_workaround = of_property_read_bool(node, + "qcom,hsusb-otg-clk-always-on-workaround"); + pdata->delay_lpm_on_disconnect = of_property_read_bool(node, + "qcom,hsusb-otg-delay-lpm"); + pdata->dp_manual_pullup = of_property_read_bool(node, + "qcom,dp-manual-pullup"); + pdata->enable_sec_phy = of_property_read_bool(node, + "qcom,usb2-enable-hsphy2"); + of_property_read_u32(node, "qcom,hsusb-log2-itc", + &pdata->log2_itc); + + pdata->pmic_id_irq = platform_get_irq_byname(pdev, "pmic_id_irq"); + if (pdata->pmic_id_irq < 0) + pdata->pmic_id_irq = 0; + + pdata->hub_reset_gpio = of_get_named_gpio( + node, "qcom,hub-reset-gpio", 0); + if (!gpio_is_valid(pdata->hub_reset_gpio)) + pr_debug("hub_reset_gpio is not available\n"); + + pdata->usbeth_reset_gpio = of_get_named_gpio( + node, "qcom,usbeth-reset-gpio", 0); + if (!gpio_is_valid(pdata->usbeth_reset_gpio)) + pr_debug("usbeth_reset_gpio is not available\n"); + + pdata->switch_sel_gpio = + of_get_named_gpio(node, "qcom,sw-sel-gpio", 0); + if (!gpio_is_valid(pdata->switch_sel_gpio)) + pr_debug("switch_sel_gpio is not available\n"); + + pdata->usb_id_gpio = + of_get_named_gpio(node, "qcom,usbid-gpio", 0); + if (!gpio_is_valid(pdata->usb_id_gpio)) + pr_debug("usb_id_gpio is not available\n"); + + pdata->l1_supported = of_property_read_bool(node, + "qcom,hsusb-l1-supported"); + pdata->enable_ahb2ahb_bypass = of_property_read_bool(node, + "qcom,ahb-async-bridge-bypass"); + pdata->disable_retention_with_vdd_min = of_property_read_bool(node, + "qcom,disable-retention-with-vdd-min"); + pdata->enable_phy_id_pullup = of_property_read_bool(node, + "qcom,enable-phy-id-pullup"); + pdata->phy_dvdd_always_on = of_property_read_bool(node, + "qcom,phy-dvdd-always-on"); + + res_gpio = of_get_named_gpio(node, "qcom,hsusb-otg-vddmin-gpio", 0); + if (!gpio_is_valid(res_gpio)) + res_gpio = 0; + pdata->vddmin_gpio = res_gpio; + + pdata->emulation = of_property_read_bool(node, + "qcom,emulation"); + + pdata->enable_streaming = of_property_read_bool(node, + "qcom,boost-sysclk-with-streaming"); + + pdata->enable_axi_prefetch = of_property_read_bool(node, + "qcom,axi-prefetch-enable"); + + pdata->vbus_low_as_hostmode = of_property_read_bool(node, + "qcom,vbus-low-as-hostmode"); + + pdata->phy_id_high_as_peripheral = of_property_read_bool(node, + "qcom,phy-id-high-as-peripheral"); + + return pdata; +} + +/* get the interconnect votes */ +static inline void iccs_get(struct msm_otg *motg) +{ + motg->icc_paths = of_icc_get(&motg->pdev->dev, icc_path_names[0]); +} + +/* put the interconnect votes */ +static inline void iccs_put(struct msm_otg *motg) +{ + icc_put(motg->icc_paths); +} + +static int msm_otg_probe(struct platform_device *pdev) +{ + int ret = 0; + int len = 0; + u32 tmp[3]; + struct resource *res; + struct msm_otg *motg; + struct usb_phy *phy; + struct msm_otg_platform_data *pdata; + void __iomem *tcsr; + int id_irq = 0; + + dev_info(&pdev->dev, "msm_otg probe\n"); + + motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL); + if (!motg) { + ret = -ENOMEM; + return ret; + } + + /* + * USB Core is running its protocol engine based on CORE CLK, + * CORE CLK must be running at >55Mhz for correct HSUSB + * operation and USB core cannot tolerate frequency changes on + * CORE CLK. For such USB cores, vote for maximum clk frequency + * on pclk source + */ + motg->core_clk = clk_get(&pdev->dev, "core_clk"); + if (IS_ERR(motg->core_clk)) { + ret = PTR_ERR(motg->core_clk); + motg->core_clk = NULL; + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get core_clk\n"); + goto free_motg; + } + + motg->core_reset = devm_reset_control_get(&pdev->dev, "core_reset"); + if (IS_ERR(motg->core_reset)) { + dev_err(&pdev->dev, "failed to get core_reset\n"); + ret = PTR_ERR(motg->core_reset); + goto put_core_clk; + } + + /* + * USB Core CLK can run at max freq if streaming is enabled. Hence, + * get Max supported clk frequency for USB Core CLK and request to set + * the same. Otherwise set USB Core CLK to defined default value. + */ + if (of_property_read_u32(pdev->dev.of_node, + "qcom,max-nominal-sysclk-rate", &ret)) { + ret = -EINVAL; + goto put_core_clk; + } else { + motg->core_clk_nominal_rate = clk_round_rate(motg->core_clk, + ret); + } + + if (of_property_read_u32(pdev->dev.of_node, + "qcom,max-svs-sysclk-rate", &ret)) { + dev_dbg(&pdev->dev, "core_clk svs freq not specified\n"); + } else { + motg->core_clk_svs_rate = clk_round_rate(motg->core_clk, ret); + } + + motg->default_noc_mode = USB_NOC_NOM_VOTE; + if (of_property_read_bool(pdev->dev.of_node, "qcom,default-mode-svs")) { + motg->core_clk_rate = motg->core_clk_svs_rate; + motg->default_noc_mode = USB_NOC_SVS_VOTE; + } else if (of_property_read_bool(pdev->dev.of_node, + "qcom,boost-sysclk-with-streaming")) { + motg->core_clk_rate = motg->core_clk_nominal_rate; + } else { + motg->core_clk_rate = clk_round_rate(motg->core_clk, + USB_DEFAULT_SYSTEM_CLOCK); + } + + if (IS_ERR_VALUE(motg->core_clk_rate)) { + dev_err(&pdev->dev, "fail to get core clk max freq.\n"); + } else { + ret = clk_set_rate(motg->core_clk, motg->core_clk_rate); + if (ret) + dev_err(&pdev->dev, "fail to set core_clk freq:%d\n", + ret); + } + + motg->pclk = clk_get(&pdev->dev, "iface_clk"); + if (IS_ERR(motg->pclk)) { + ret = PTR_ERR(motg->pclk); + motg->pclk = NULL; + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to get iface_clk\n"); + goto put_core_clk; + } + + motg->xo_clk = clk_get(&pdev->dev, "xo"); + if (IS_ERR(motg->xo_clk)) { + ret = PTR_ERR(motg->xo_clk); + motg->xo_clk = NULL; + if (ret == -EPROBE_DEFER) + goto put_pclk; + } + + /* + * On few platforms USB PHY is fed with sleep clk. + * Hence don't fail probe. + */ + motg->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk"); + if (IS_ERR(motg->sleep_clk)) { + ret = PTR_ERR(motg->sleep_clk); + motg->sleep_clk = NULL; + if (ret == -EPROBE_DEFER) + goto put_xo_clk; + else + dev_dbg(&pdev->dev, "failed to get sleep_clk\n"); + } else { + ret = clk_prepare_enable(motg->sleep_clk); + if (ret) { + dev_err(&pdev->dev, "%s failed to vote sleep_clk%d\n", + __func__, ret); + goto put_xo_clk; + } + } + + /* + * If present, phy_reset_clk is used to reset the PHY, ULPI bridge + * and CSR Wrapper. This is a reset only clock. + */ + + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "phy_reset_clk") >= 0) { + motg->phy_reset_clk = devm_clk_get(&pdev->dev, "phy_reset_clk"); + if (IS_ERR(motg->phy_reset_clk)) { + ret = PTR_ERR(motg->phy_reset_clk); + goto disable_sleep_clk; + } + } + + motg->phy_reset = devm_reset_control_get(&pdev->dev, + "phy_reset"); + if (IS_ERR(motg->phy_reset)) { + dev_err(&pdev->dev, "failed to get phy_reset\n"); + ret = PTR_ERR(motg->phy_reset); + goto disable_sleep_clk; + } + + /* + * If present, phy_por_clk is used to assert/de-assert phy POR + * input. This is a reset only clock. phy POR must be asserted + * after overriding the parameter registers via CSR wrapper or + * ULPI bridge. + */ + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "phy_por_clk") >= 0) { + motg->phy_por_clk = devm_clk_get(&pdev->dev, "phy_por_clk"); + if (IS_ERR(motg->phy_por_clk)) { + ret = PTR_ERR(motg->phy_por_clk); + goto disable_sleep_clk; + } + } + + motg->phy_por_reset = devm_reset_control_get(&pdev->dev, + "phy_por_reset"); + if (IS_ERR(motg->phy_por_reset)) { + dev_err(&pdev->dev, "failed to get phy_por_reset\n"); + ret = PTR_ERR(motg->phy_por_reset); + goto disable_sleep_clk; + } + + /* + * If present, phy_csr_clk is required for accessing PHY + * CSR registers via AHB2PHY interface. + */ + if (of_property_match_string(pdev->dev.of_node, + "clock-names", "phy_csr_clk") >= 0) { + motg->phy_csr_clk = devm_clk_get(&pdev->dev, "phy_csr_clk"); + if (IS_ERR(motg->phy_csr_clk)) { + ret = PTR_ERR(motg->phy_csr_clk); + goto disable_sleep_clk; + } else { + ret = clk_prepare_enable(motg->phy_csr_clk); + if (ret) { + dev_err(&pdev->dev, + "fail to enable phy csr clk %d\n", ret); + goto disable_sleep_clk; + } + } + } + + of_property_read_u32(pdev->dev.of_node, "qcom,pm-qos-latency", + &motg->pm_qos_latency); + + motg->enable_sdp_check_timer = of_property_read_bool(pdev->dev.of_node, + "qcom,enumeration-check-for-sdp"); + + pdata = msm_otg_dt_to_pdata(pdev); + if (!pdata) { + ret = -ENOMEM; + goto disable_phy_csr_clk; + } + pdev->dev.platform_data = pdata; + + motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), + GFP_KERNEL); + if (!motg->phy.otg) { + ret = -ENOMEM; + goto disable_phy_csr_clk; + } + + the_msm_otg = motg; + motg->pdata = pdata; + phy = &motg->phy; + phy->dev = &pdev->dev; + motg->pdev = pdev; + motg->dbg_idx = 0; + motg->dbg_lock = __RW_LOCK_UNLOCKED(lck); + mutex_init(&motg->lock); + + /*Get ICB phandle*/ + iccs_get(motg); + msm_otg_update_bus_bw(motg, USB_MIN_PERF_VOTE); + + + ret = msm_otg_bus_freq_get(motg); + if (ret) { + pr_err("failed to get noc clocks: %d\n", ret); + } else { + ret = msm_otg_bus_freq_set(motg, motg->default_noc_mode); + if (ret) + pr_err("failed to vote explicit noc rates: %d\n", ret); + } + + /* initialize reset counter */ + motg->reset_counter = 0; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); + if (!res) { + dev_err(&pdev->dev, "failed to get core iomem resource\n"); + ret = -ENODEV; + goto disable_phy_csr_clk; + } + + motg->io_res = res; + motg->regs = ioremap(res->start, resource_size(res)); + if (!motg->regs) { + dev_err(&pdev->dev, "core iomem ioremap failed\n"); + ret = -ENOMEM; + goto disable_phy_csr_clk; + } + dev_info(&pdev->dev, "OTG regs = %pK\n", motg->regs); + + if (pdata->enable_sec_phy) { + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "tcsr"); + if (!res) { + dev_dbg(&pdev->dev, "missing TCSR memory resource\n"); + } else { + tcsr = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); + if (!tcsr) { + dev_dbg(&pdev->dev, "tcsr ioremap failed\n"); + } else { + /* Enable USB2 on secondary HSPHY. */ + writel_relaxed(0x1, tcsr); + /* + * Ensure that TCSR write is completed before + * USB registers initialization. + */ + mb(); + } + } + } + + if (pdata->enable_sec_phy) + motg->usb_phy_ctrl_reg = USB_PHY_CTRL2; + else + motg->usb_phy_ctrl_reg = USB_PHY_CTRL; + + /* + * The USB PHY wrapper provides a register interface + * through AHB2PHY for performing PHY related operations + * like retention, HV interrupts and overriding parameter + * registers etc. The registers start at 4 byte boundary + * but only the first byte is valid and remaining are not + * used. Relaxed versions of readl/writel should be used. + * + * The link does not have any PHY specific registers. + * Hence set motg->usb_phy_ctrl_reg to. + */ + if (motg->pdata->phy_type == SNPS_FEMTO_PHY || + pdata->phy_type == QUSB_ULPI_PHY) { + res = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "phy_csr"); + if (!res) { + dev_err(&pdev->dev, "PHY CSR IOMEM missing!\n"); + ret = -ENODEV; + goto free_regs; + } + motg->phy_csr_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(motg->phy_csr_regs)) { + ret = PTR_ERR(motg->phy_csr_regs); + dev_err(&pdev->dev, "PHY CSR ioremap failed!\n"); + goto free_regs; + } + motg->usb_phy_ctrl_reg = 0; + } + + motg->irq = platform_get_irq(pdev, 0); + if (!motg->irq) { + dev_err(&pdev->dev, "platform_get_irq failed\n"); + ret = -ENODEV; + goto free_regs; + } + + motg->async_irq = platform_get_irq_byname(pdev, "async_irq"); + if (motg->async_irq < 0) { + dev_err(&pdev->dev, "platform_get_irq for async_int failed\n"); + motg->async_irq = 0; + goto free_regs; + } + + if (motg->xo_clk) { + ret = clk_prepare_enable(motg->xo_clk); + if (ret) { + dev_err(&pdev->dev, + "%s failed to vote for TCXO %d\n", + __func__, ret); + goto free_xo_handle; + } + } + + + clk_prepare_enable(motg->pclk); + + hsusb_vdd = devm_regulator_get(motg->phy.dev, "hsusb_vdd_dig"); + if (IS_ERR(hsusb_vdd)) { + hsusb_vdd = devm_regulator_get(motg->phy.dev, "HSUSB_VDDCX"); + if (IS_ERR(hsusb_vdd)) { + dev_err(motg->phy.dev, "unable to get hsusb vddcx\n"); + ret = PTR_ERR(hsusb_vdd); + goto devote_xo_handle; + } + } + + len = of_property_count_elems_of_size(pdev->dev.of_node, + "qcom,vdd-voltage-level", sizeof(len)); + if (len > 0) { + if (len == sizeof(tmp) / sizeof(len)) { + of_property_read_u32_array(pdev->dev.of_node, + "qcom,vdd-voltage-level", + tmp, len); + vdd_val[0] = tmp[0]; + vdd_val[1] = tmp[1]; + vdd_val[2] = tmp[2]; + } else { + dev_dbg(&pdev->dev, + "Using default hsusb vdd config.\n"); + goto devote_xo_handle; + } + } else { + goto devote_xo_handle; + } + + ret = msm_hsusb_config_vddcx(1); + if (ret) { + dev_err(&pdev->dev, "hsusb vddcx configuration failed\n"); + goto devote_xo_handle; + } + + ret = regulator_enable(hsusb_vdd); + if (ret) { + dev_err(&pdev->dev, "unable to enable the hsusb vddcx\n"); + goto free_config_vddcx; + } + + ret = msm_hsusb_ldo_init(motg, 1); + if (ret) { + dev_err(&pdev->dev, "hsusb vreg configuration failed\n"); + goto free_hsusb_vdd; + } + + /* Get pinctrl if target uses pinctrl */ + motg->phy_pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(motg->phy_pinctrl)) { + if (of_property_read_bool(pdev->dev.of_node, "pinctrl-names")) { + dev_err(&pdev->dev, "Error encountered while getting pinctrl\n"); + ret = PTR_ERR(motg->phy_pinctrl); + goto free_ldo_init; + } + dev_dbg(&pdev->dev, "Target does not use pinctrl\n"); + motg->phy_pinctrl = NULL; + } + + ret = msm_hsusb_ldo_enable(motg, USB_PHY_REG_ON); + if (ret) { + dev_err(&pdev->dev, "hsusb vreg enable failed\n"); + goto free_ldo_init; + } + clk_prepare_enable(motg->core_clk); + + writel_relaxed(0, USB_USBINTR); + writel_relaxed(0, USB_OTGSC); + /* Ensure that above STOREs are completed before enabling interrupts */ + mb(); + + motg->id_state = USB_ID_FLOAT; + set_bit(ID, &motg->inputs); + INIT_WORK(&motg->sm_work, msm_otg_sm_work); + INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work); + INIT_DELAYED_WORK(&motg->id_status_work, msm_id_status_w); + INIT_DELAYED_WORK(&motg->perf_vote_work, msm_otg_perf_vote_work); + INIT_DELAYED_WORK(&motg->sdp_check, check_for_sdp_connection); + INIT_WORK(&motg->notify_charger_work, msm_otg_notify_charger_work); + motg->otg_wq = alloc_ordered_workqueue("k_otg", WQ_FREEZABLE); + if (!motg->otg_wq) { + pr_err("%s: Unable to create workqueue otg_wq\n", + __func__); + goto disable_core_clk; + } + + ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED, + "msm_otg", motg); + if (ret) { + dev_err(&pdev->dev, "request irq failed\n"); + goto destroy_wq; + } + + motg->phy_irq = platform_get_irq_byname(pdev, "phy_irq"); + if (motg->phy_irq < 0) { + dev_dbg(&pdev->dev, "phy_irq is not present\n"); + motg->phy_irq = 0; + } else { + + /* clear all interrupts before enabling the IRQ */ + writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR0); + writeb_relaxed(0xFF, USB2_PHY_USB_PHY_INTERRUPT_CLEAR1); + + writeb_relaxed(0x1, USB2_PHY_USB_PHY_IRQ_CMD); + /* + * Databook says 200 usec delay is required for + * clearing the interrupts. + */ + udelay(200); + writeb_relaxed(0x0, USB2_PHY_USB_PHY_IRQ_CMD); + + ret = devm_request_irq(&pdev->dev, motg->phy_irq, + msm_otg_phy_irq_handler, IRQF_TRIGGER_RISING, + "msm_otg_phy_irq", motg); + if (ret < 0) { + dev_err(&pdev->dev, "phy_irq request fail %d\n", ret); + goto destroy_wq; + } + } + + ret = devm_request_irq(&pdev->dev, motg->async_irq, msm_otg_irq, + IRQF_TRIGGER_RISING, "msm_otg", motg); + if (ret) { + dev_err(&pdev->dev, "request irq failed (ASYNC INT)\n"); + goto destroy_wq; + } + disable_irq(motg->async_irq); + + phy->init = msm_otg_reset; + phy->set_power = msm_otg_set_power; + phy->set_suspend = msm_otg_set_suspend; + + phy->io_ops = &msm_otg_io_ops; + + phy->otg->usb_phy = &motg->phy; + phy->otg->set_host = msm_otg_set_host; + phy->otg->set_peripheral = msm_otg_set_peripheral; + if (pdata->dp_manual_pullup) + phy->flags |= ENABLE_DP_MANUAL_PULLUP; + + if (pdata->enable_sec_phy) + phy->flags |= ENABLE_SECONDARY_PHY; + + phy->vbus_nb.notifier_call = msm_otg_vbus_notifier; + phy->id_nb.notifier_call = msm_otg_id_notifier; + ret = usb_add_phy(&motg->phy, USB_PHY_TYPE_USB2); + if (ret) { + dev_err(&pdev->dev, "usb_add_phy failed\n"); + goto destroy_wq; + } + + ret = usb_phy_regulator_init(motg); + if (ret) { + dev_err(&pdev->dev, "usb_phy_regulator_init failed\n"); + goto remove_phy; + } + + if (motg->pdata->mode == USB_OTG && + motg->pdata->otg_control == OTG_PMIC_CONTROL && + !motg->phy_irq) { + + if (gpio_is_valid(motg->pdata->usb_id_gpio)) { + /* usb_id_gpio request */ + ret = devm_gpio_request(&pdev->dev, + motg->pdata->usb_id_gpio, + "USB_ID_GPIO"); + if (ret < 0) { + dev_err(&pdev->dev, "gpio req failed for id\n"); + goto phy_reg_deinit; + } + + /* + * The following code implements switch between the HOST + * mode to device mode when used different HW components + * on the same port: USB HUB and the usb jack type B + * for device mode In this case HUB should be gone + * only once out of reset at the boot time and after + * that always stay on + */ + if (gpio_is_valid(motg->pdata->hub_reset_gpio)) { + ret = devm_gpio_request(&pdev->dev, + motg->pdata->hub_reset_gpio, + "qcom,hub-reset-gpio"); + if (ret < 0) { + dev_err(&pdev->dev, "gpio req failed for hub reset\n"); + goto phy_reg_deinit; + } + gpio_direction_output( + motg->pdata->hub_reset_gpio, 1); + } + + if (gpio_is_valid(motg->pdata->switch_sel_gpio)) { + ret = devm_gpio_request(&pdev->dev, + motg->pdata->switch_sel_gpio, + "qcom,sw-sel-gpio"); + if (ret < 0) { + dev_err(&pdev->dev, "gpio req failed for switch sel\n"); + goto phy_reg_deinit; + } + if (gpio_get_value(motg->pdata->usb_id_gpio)) + gpio_direction_input( + motg->pdata->switch_sel_gpio); + + else + gpio_direction_output( + motg->pdata->switch_sel_gpio, + 1); + } + + /* usb_id_gpio to irq */ + id_irq = gpio_to_irq(motg->pdata->usb_id_gpio); + motg->ext_id_irq = id_irq; + } else if (motg->pdata->pmic_id_irq) { + id_irq = motg->pdata->pmic_id_irq; + } + + if (id_irq) { + ret = devm_request_irq(&pdev->dev, id_irq, + msm_id_irq, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "msm_otg", motg); + if (ret) { + dev_err(&pdev->dev, "request irq failed for ID\n"); + goto phy_reg_deinit; + } + } else { + /* PMIC does USB ID detection and notifies through + * USB_OTG property of USB powersupply. + */ + dev_dbg(&pdev->dev, "PMIC does ID detection\n"); + } + } + + platform_set_drvdata(pdev, motg); + device_init_wakeup(&pdev->dev, 1); + + ret = msm_otg_debugfs_init(motg); + if (ret) + dev_dbg(&pdev->dev, "mode debugfs file is not available\n"); + + if (motg->pdata->otg_control == OTG_PMIC_CONTROL && + (!(motg->pdata->mode == USB_OTG) || + motg->pdata->pmic_id_irq || motg->ext_id_irq || + !motg->phy_irq)) + motg->caps = ALLOW_PHY_POWER_COLLAPSE | ALLOW_PHY_RETENTION; + + if (motg->pdata->otg_control == OTG_PHY_CONTROL || motg->phy_irq || + motg->pdata->enable_phy_id_pullup) + motg->caps = ALLOW_PHY_RETENTION | ALLOW_PHY_REGULATORS_LPM; + + motg->caps |= ALLOW_HOST_PHY_RETENTION; + + device_create_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable); + + if (motg->pdata->enable_lpm_on_dev_suspend) + motg->caps |= ALLOW_LPM_ON_DEV_SUSPEND; + + if (motg->pdata->disable_retention_with_vdd_min) + motg->caps |= ALLOW_VDD_MIN_WITH_RETENTION_DISABLED; + + /* + * PHY DVDD is supplied by a always on PMIC LDO (unlike + * vddcx/vddmx). PHY can keep D+ pull-up and D+/D- + * pull-down during suspend without any additional + * hardware re-work. + */ + if (motg->pdata->phy_type == SNPS_FEMTO_PHY) + motg->caps |= ALLOW_BUS_SUSPEND_WITHOUT_REWORK; + + pm_stay_awake(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + + if (motg->pdata->delay_lpm_on_disconnect) { + pm_runtime_set_autosuspend_delay(&pdev->dev, + lpm_disconnect_thresh); + pm_runtime_use_autosuspend(&pdev->dev); + } + + if (pdev->dev.of_node) { + ret = msm_otg_setup_devices(pdev, pdata->mode, true); + if (ret) { + dev_err(&pdev->dev, "devices setup failed\n"); + goto remove_cdev; + } + } + + if (gpio_is_valid(motg->pdata->hub_reset_gpio)) { + ret = devm_gpio_request(&pdev->dev, + motg->pdata->hub_reset_gpio, + "HUB_RESET"); + if (ret < 0) { + dev_err(&pdev->dev, "gpio req failed for hub_reset\n"); + } else { + gpio_direction_output( + motg->pdata->hub_reset_gpio, 0); + /* 5 microsecs reset signaling to usb hub */ + usleep_range(5, 10); + gpio_direction_output( + motg->pdata->hub_reset_gpio, 1); + } + } + + if (gpio_is_valid(motg->pdata->usbeth_reset_gpio)) { + ret = devm_gpio_request(&pdev->dev, + motg->pdata->usbeth_reset_gpio, + "ETH_RESET"); + if (ret < 0) { + dev_err(&pdev->dev, "gpio req failed for usbeth_reset\n"); + } else { + gpio_direction_output( + motg->pdata->usbeth_reset_gpio, 0); + /* 100 microsecs reset signaling to usb-to-eth */ + usleep_range(100, 110); + gpio_direction_output( + motg->pdata->usbeth_reset_gpio, 1); + } + } + + if (of_property_read_bool(pdev->dev.of_node, "extcon")) { + if (extcon_get_state(motg->phy.edev, EXTCON_USB_HOST)) { + msm_otg_id_notifier(&motg->phy.id_nb, + 1, motg->phy.edev); + } else if (extcon_get_state(motg->phy.edev, EXTCON_USB)) { + msm_otg_vbus_notifier(&motg->phy.vbus_nb, + 1, motg->phy.edev); + } + } + + motg->pm_notify.notifier_call = msm_otg_pm_notify; + register_pm_notifier(&motg->pm_notify); + msm_otg_dbg_log_event(phy, "OTG PROBE", motg->caps, motg->lpm_flags); + + return 0; + +remove_cdev: + pm_runtime_disable(&pdev->dev); + device_remove_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable); + msm_otg_debugfs_cleanup(); +phy_reg_deinit: + regulator_unregister(motg->dpdm_rdev); +remove_phy: + usb_remove_phy(&motg->phy); +destroy_wq: + destroy_workqueue(motg->otg_wq); +disable_core_clk: + clk_disable_unprepare(motg->core_clk); + msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF); +free_ldo_init: + msm_hsusb_ldo_init(motg, 0); +free_hsusb_vdd: + regulator_disable(hsusb_vdd); +free_config_vddcx: + regulator_set_voltage(hsusb_vdd, + vdd_val[VDD_NONE], + vdd_val[VDD_MAX]); +devote_xo_handle: + clk_disable_unprepare(motg->pclk); + if (motg->xo_clk) + clk_disable_unprepare(motg->xo_clk); +free_xo_handle: + if (motg->xo_clk) { + clk_put(motg->xo_clk); + motg->xo_clk = NULL; + } +free_regs: + iounmap(motg->regs); +disable_phy_csr_clk: + iccs_put(motg); + if (motg->phy_csr_clk) + clk_disable_unprepare(motg->phy_csr_clk); +disable_sleep_clk: + if (motg->sleep_clk) + clk_disable_unprepare(motg->sleep_clk); +put_xo_clk: + if (motg->xo_clk) + clk_put(motg->xo_clk); +put_pclk: + if (motg->pclk) + clk_put(motg->pclk); +put_core_clk: + if (motg->core_clk) + clk_put(motg->core_clk); +free_motg: + kfree(motg); + return ret; +} + +static int msm_otg_remove(struct platform_device *pdev) +{ + struct msm_otg *motg = platform_get_drvdata(pdev); + struct usb_phy *phy = &motg->phy; + int cnt = 0; + + if (phy->otg->host || phy->otg->gadget) + return -EBUSY; + + unregister_pm_notifier(&motg->pm_notify); + + if (pdev->dev.of_node) + msm_otg_setup_devices(pdev, motg->pdata->mode, false); + if (psy) + power_supply_put(psy); + msm_otg_debugfs_cleanup(); + cancel_delayed_work_sync(&motg->chg_work); + cancel_delayed_work_sync(&motg->sdp_check); + cancel_delayed_work_sync(&motg->id_status_work); + cancel_delayed_work_sync(&motg->perf_vote_work); + msm_otg_perf_vote_update(motg, false); + cancel_work_sync(&motg->sm_work); + cancel_work_sync(&motg->notify_charger_work); + destroy_workqueue(motg->otg_wq); + + pm_runtime_resume(&pdev->dev); + + device_init_wakeup(&pdev->dev, 0); + pm_runtime_disable(&pdev->dev); + + usb_remove_phy(phy); + + device_remove_file(&pdev->dev, &dev_attr_dpdm_pulldown_enable); + + /* + * Put PHY in low power mode. + */ + ulpi_read(phy, 0x14); + ulpi_write(phy, 0x08, 0x09); + + writel_relaxed(readl_relaxed(USB_PORTSC) | PORTSC_PHCD, USB_PORTSC); + while (cnt < PHY_SUSPEND_TIMEOUT_USEC) { + if (readl_relaxed(USB_PORTSC) & PORTSC_PHCD) + break; + udelay(1); + cnt++; + } + if (cnt >= PHY_SUSPEND_TIMEOUT_USEC) + dev_err(phy->dev, "Unable to suspend PHY\n"); + + clk_disable_unprepare(motg->pclk); + clk_disable_unprepare(motg->core_clk); + if (motg->phy_csr_clk) + clk_disable_unprepare(motg->phy_csr_clk); + if (motg->xo_clk) { + clk_disable_unprepare(motg->xo_clk); + clk_put(motg->xo_clk); + } + + if (!IS_ERR(motg->sleep_clk)) + clk_disable_unprepare(motg->sleep_clk); + + msm_hsusb_ldo_enable(motg, USB_PHY_REG_OFF); + msm_hsusb_ldo_init(motg, 0); + regulator_disable(hsusb_vdd); + regulator_set_voltage(hsusb_vdd, + vdd_val[VDD_NONE], + vdd_val[VDD_MAX]); + + iounmap(motg->regs); + pm_runtime_set_suspended(&pdev->dev); + + clk_put(motg->pclk); + clk_put(motg->core_clk); + + msm_otg_update_bus_bw(motg, USB_NO_PERF_VOTE); + + return 0; +} + +static void msm_otg_shutdown(struct platform_device *pdev) +{ + struct msm_otg *motg = platform_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "OTG shutdown\n"); + msm_hsusb_vbus_power(motg, 0); +} + +#ifdef CONFIG_PM +static int msm_otg_runtime_idle(struct device *dev) +{ + struct msm_otg *motg = dev_get_drvdata(dev); + struct usb_phy *phy = &motg->phy; + + dev_dbg(dev, "OTG runtime idle\n"); + msm_otg_dbg_log_event(phy, "RUNTIME IDLE", phy->otg->state, 0); + + if (phy->otg->state == OTG_STATE_UNDEFINED) + return -EAGAIN; + + return 0; +} + +static int msm_otg_runtime_suspend(struct device *dev) +{ + struct msm_otg *motg = dev_get_drvdata(dev); + + dev_dbg(dev, "OTG runtime suspend\n"); + msm_otg_dbg_log_event(&motg->phy, "RUNTIME SUSPEND", + get_pm_runtime_counter(dev), 0); + return msm_otg_suspend(motg); +} + +static int msm_otg_runtime_resume(struct device *dev) +{ + struct msm_otg *motg = dev_get_drvdata(dev); + + dev_dbg(dev, "OTG runtime resume\n"); + msm_otg_dbg_log_event(&motg->phy, "RUNTIME RESUME", + get_pm_runtime_counter(dev), 0); + + return msm_otg_resume(motg); +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int msm_otg_pm_suspend(struct device *dev) +{ + struct msm_otg *motg = dev_get_drvdata(dev); + + dev_dbg(dev, "OTG PM suspend\n"); + msm_otg_dbg_log_event(&motg->phy, "PM SUSPEND START", + get_pm_runtime_counter(dev), + atomic_read(&motg->pm_suspended)); + + /* flush any pending sm_work first */ + flush_work(&motg->sm_work); + if (!atomic_read(&motg->in_lpm)) { + dev_err(dev, "Abort PM suspend!! (USB is outside LPM)\n"); + return -EBUSY; + } + atomic_set(&motg->pm_suspended, 1); + + return 0; +} + +static int msm_otg_pm_resume(struct device *dev) +{ + struct msm_otg *motg = dev_get_drvdata(dev); + + dev_dbg(dev, "OTG PM resume\n"); + msm_otg_dbg_log_event(&motg->phy, "PM RESUME START", + get_pm_runtime_counter(dev), pm_runtime_suspended(dev)); + + if (motg->resume_pending || motg->phy_irq_pending) { + msm_otg_dbg_log_event(&motg->phy, "PM RESUME BY USB", + motg->async_int, motg->resume_pending); + /* sm work if pending will start in pm notify to exit LPM */ + } + + return 0; +} +#endif + +#ifdef CONFIG_PM +static const struct dev_pm_ops msm_otg_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume) + SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume, + msm_otg_runtime_idle) +}; +#endif + +static const struct of_device_id msm_otg_dt_match[] = { + { .compatible = "qcom,hsusb-otg", + }, + {} +}; + +static struct platform_driver msm_otg_driver = { + .probe = msm_otg_probe, + .remove = msm_otg_remove, + .shutdown = msm_otg_shutdown, + .driver = { + .name = DRIVER_NAME, +#ifdef CONFIG_PM + .pm = &msm_otg_dev_pm_ops, +#endif + .of_match_table = msm_otg_dt_match, + }, +}; + +module_platform_driver(msm_otg_driver); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MSM USB transceiver driver"); diff --git a/drivers/virt/gunyah/gh_rm_iface.c b/drivers/virt/gunyah/gh_rm_iface.c index 958d46301623..ddbafaff36a3 100644 --- a/drivers/virt/gunyah/gh_rm_iface.c +++ b/drivers/virt/gunyah/gh_rm_iface.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * */ @@ -67,7 +67,7 @@ int gh_update_vm_prop_table(enum gh_vm_names vm_name, if (!vm_prop) return -EINVAL; - if (vm_prop->vmid < 0 || vm_name < GH_SELF_VM || vm_name > GH_VM_MAX) + if (vm_prop->vmid < 0 || vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX) return -EINVAL; spin_lock(&gh_vm_table_lock); @@ -129,10 +129,9 @@ int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid) gh_vmid_t _vmid; int ret = 0; - if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX) + if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX) return -EINVAL; - spin_lock(&gh_vm_table_lock); _vmid = gh_vm_table[vm_name].vmid; @@ -196,11 +195,10 @@ int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vm) if (!vm) return -EINVAL; - spin_lock(&gh_vm_table_lock); - if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX) { - spin_unlock(&gh_vm_table_lock); + if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX) return -EINVAL; - } + + spin_lock(&gh_vm_table_lock); vm->guid = gh_vm_table[vm_name].guid; vm->uri = gh_vm_table[vm_name].uri; @@ -980,7 +978,7 @@ int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid) /* Look up for the vm_name<->vmid pair if already present. * If so, return. */ - if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX) + if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX) return -EINVAL; spin_lock(&gh_vm_table_lock); diff --git a/gen3auto.bzl b/gen3auto.bzl index 426a040f9e85..529f98c75e23 100644 --- a/gen3auto.bzl +++ b/gen3auto.bzl @@ -90,6 +90,7 @@ def define_gen3auto(): "drivers/md/dm-bow.ko", "drivers/media/platform/msm/npu/msm_npu.ko", "drivers/mfd/qcom-spmi-pmic.ko", + "drivers/misc/bootmarker_proxy.ko", "drivers/misc/qseecom_proxy.ko", "drivers/mmc/host/cqhci.ko", "drivers/mmc/host/sdhci-msm.ko", diff --git a/include/linux/bootmarker_kernel.h b/include/linux/bootmarker_kernel.h new file mode 100644 index 000000000000..173cfaa26290 --- /dev/null +++ b/include/linux/bootmarker_kernel.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef __BOOTMARKER_KERNEL_H_ +#define __BOOTMARKER_KERNEL_H_ + +#include + +int bootmarker_place_marker(const char *name); +#if IS_ENABLED(CONFIG_BOOTMARKER_PROXY) +struct bootmarker_drv_ops { + int (*bootmarker_place_marker)(const char *name); +}; +int provide_bootmarker_kernel_fun_ops(const struct bootmarker_drv_ops *ops); + +#endif /*CONFIG_BOOTMARKER_PROXY*/ +#endif /* __BOOTMARKER_KERNEL_H_ */ diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 7c8d65414a70..6820c15aac99 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -4,6 +4,8 @@ #include +#define BQ27XXX_RESISTANCE_TABLE_LENGTH 15 + enum bq27xxx_chip { BQ27000 = 1, /* bq27000, bq27200 */ BQ27010, /* bq27010, bq27210 */ @@ -78,6 +80,10 @@ struct bq27xxx_device_info { struct list_head list; struct mutex lock; u8 *regs; +#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM + u32 qmax_cell0; + u32 resist_table[BQ27XXX_RESISTANCE_TABLE_LENGTH]; +#endif }; void bq27xxx_battery_update(struct bq27xxx_device_info *di); diff --git a/include/linux/qca8337.h b/include/linux/qca8337.h new file mode 100644 index 000000000000..9c4f256f83c0 --- /dev/null +++ b/include/linux/qca8337.h @@ -0,0 +1,313 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCA8337_H__ +#define __QCA8337_H__ + +#define BITS(_s, _n) (((1UL << (_n)) - 1) << (_s)) + +#define QCA8337_PHY_ID 0x004dd036 +#define ATH8030_PHY_ID 0x004dd076 +#define ATH8031_PHY_ID 0x004dd074 +#define ATH8035_PHY_ID 0x004dd072 +#define QCA8337_ID_QCA8337 0x13 +#define QCA8337_NUM_PORTS 7 +/* Make sure that port0 is the cpu port */ +#define QCA8337_CPU_PORT 0 +/* size of the vlan table */ +#define QCA8337_MAX_VLANS 128 +#define QCA8337_NUM_PHYS 5 + +#define ADVERTISE_MULTI_PORT_PREFER 0x0400 + +#define QCA8337_AT803X_INTR_ENABLE 0x12 +#define QCA8337_AT803X_INTR_STATUS 0x13 +#define QCA8337_AT803X_SMART_SPEED 0x14 +#define QCA8337_AT803X_LED_CONTROL 0x18 +#define QCA8337_AT803X_WOL_ENABLE 0x01 +#define QCA8337_AT803X_DEVICE_ADDR 0x03 +#define QCA8337_AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C +#define QCA8337_AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B +#define QCA8337_AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A +#define QCA8337_AT803X_MMD_ACCESS_CONTROL 0x0D +#define QCA8337_AT803X_MMD_ACCESS_CONTROL_DATA 0x0E +#define QCA8337_AT803X_FUNC_DATA 0x4003 +#define QCA8337_AT803X_INER 0x0012 +#define QCA8337_AT803X_INER_INIT 0xec00 +#define QCA8337_AT803X_INSR 0x0013 +#define QCA8337_AT803X_DEBUG_ADDR 0x1D +#define QCA8337_AT803X_DEBUG_DATA 0x1E +#define QCA8337_AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05 +#define QCA8337_AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8) + +/* MASK_CTRL */ +#define QCA8337_REG_MASK_CTRL 0x0000 +#define QCA8337_CTRL_REVISION BITS(0, 8) +#define QCA8337_CTRL_REVISION_S 0 +#define QCA8337_CTRL_VERSION BITS(8, 8) +#define QCA8337_CTRL_VERSION_S 8 +#define QCA8337_CTRL_RESET BIT(31) + +/* PORT0/1_PAD_CTRL */ +#define QCA8337_REG_PAD0_CTRL 0x004 +#define QCA8337_REG_PAD5_CTRL 0x008 +#define QCA8337_REG_PAD6_CTRL 0x00c +#define QCA8337_PAD_MAC_MII_RXCLK_SEL BIT(0) +#define QCA8337_PAD_MAC_MII_TXCLK_SEL BIT(1) +#define QCA8337_PAD_MAC_MII_EN BIT(2) +#define QCA8337_PAD_MAC_GMII_RXCLK_SEL BIT(4) +#define QCA8337_PAD_MAC_GMII_TXCLK_SEL BIT(5) +#define QCA8337_PAD_MAC_GMII_EN BIT(6) +#define QCA8337_PAD_SGMII_EN BIT(7) +#define QCA8337_PAD_PHY_MII_RXCLK_SEL BIT(8) +#define QCA8337_PAD_PHY_MII_TXCLK_SEL BIT(9) +#define QCA8337_PAD_PHY_MII_EN BIT(10) +#define QCA8337_PAD_PHY_GMII_PIPE_RXCLK_SEL BIT(11) +#define QCA8337_PAD_PHY_GMII_RXCLK_SEL BIT(12) +#define QCA8337_PAD_PHY_GMII_TXCLK_SEL BIT(13) +#define QCA8337_PAD_PHY_GMII_EN BIT(14) +#define QCA8337_PAD_PHYX_GMII_EN BIT(16) +#define QCA8337_PAD_PHYX_RGMII_EN BIT(17) +#define QCA8337_PAD_PHYX_MII_EN BIT(18) +#define QCA8337_PAD_RGMII_RXCLK_DELAY_SEL BITS(20, 2) +#define QCA8337_PAD_RGMII_RXCLK_DELAY_SEL_S 20 +#define QCA8337_PAD_RGMII_TXCLK_DELAY_SEL BITS(22, 2) +#define QCA8337_PAD_RGMII_TXCLK_DELAY_SEL_S 22 +#define QCA8337_PAD_RGMII_RXCLK_DELAY_EN BIT(24) +#define QCA8337_PAD_RGMII_TXCLK_DELAY_EN BIT(25) +#define QCA8337_PAD_RGMII_EN BIT(26) + +/* PORT_STATUS */ +#define QCA8337_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) +#define QCA8337_PORT_STATUS_SPEED BITS(0, 2) +#define QCA8337_PORT_STATUS_SPEED_S 0 +#define QCA8337_PORT_STATUS_TXMAC BIT(2) +#define QCA8337_PORT_STATUS_RXMAC BIT(3) +#define QCA8337_PORT_STATUS_TXFLOW BIT(4) +#define QCA8337_PORT_STATUS_RXFLOW BIT(5) +#define QCA8337_PORT_STATUS_DUPLEX BIT(6) +#define QCA8337_PORT_STATUS_LINK_UP BIT(8) +#define QCA8337_PORT_STATUS_LINK_AUTO BIT(9) +#define QCA8337_PORT_STATUS_LINK_PAUSE BIT(10) + +/* GLOBAL_FW_CTRL0 */ +#define QCA8337_REG_GLOBAL_FW_CTRL0 0x620 +#define QCA8337_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) + +/* GLOBAL_FW_CTRL1 */ +#define QCA8337_REG_GLOBAL_FW_CTRL1 0x624 +#define QCA8337_IGMP_JN_L_DP_SH 24 +#define QCA8337_BROAD_DP_SHIFT 16 +#define QCA8337_MULTI_FLOOD_DP_SH 8 +#define QCA8337_UNI_FLOOD_DP_SHIFT 0 +#define QCA8337_IGMP_JOIN_LEAVE_DPALL (0x7f << QCA8337_IGMP_JN_L_DP_SH) +#define QCA8337_BROAD_DPALL (0x7f << QCA8337_BROAD_DP_SHIFT) +#define QCA8337_MULTI_FLOOD_DPALL (0x7f << QCA8337_MULTI_FLOOD_DP_SH) +#define QCA8337_UNI_FLOOD_DPALL (0x7f << QCA8337_UNI_FLOOD_DP_SHIFT) + +/* PWS_REG (POWER_ON_STRIP) */ +#define QCA8337_REG_POWER_ON_STRIP 0x010 +#define QCA8337_REG_POS_VAL 0x261320 +#define QCA8337_PWS_POWER_ON_SEL BIT(31) +#define QCA8337_PWS_LED_OPEN_EN BIT(24) +#define QCA8337_PWS_SERDES_AEN BIT(7) + +/* MAC_PWR_SEL*/ +#define QCA8337_MAC_PWR_SEL 0x0e4 +#define QCA8337_MAC_PWR_SEL_VAL 0xaa545 + +/* SGMII_CTRL */ +#define QCA8337_SGMII_CTRL_REG 0x0e0 +#define QCA8337_SGMII_CTRL_VAL 0xc74164de +#define QCA8337_SGMII_CTRL_MODE_CTRL BITS(22, 2) +#define QCA8337_SGMII_CTRL_MODE_CTRL_S 22 +#define QCA8337_SGMII_EN_LCKDT BIT(0) +#define QCA8337_SGMII_EN_PLL BIT(1) +#define QCA8337_SGMII_EN_RX BIT(2) +#define QCA8337_SGMII_EN_TX BIT(3) +#define QCA8337_SGMII_EN_SD BIT(4) +#define QCA8337_SGMII_BW_HIGH BIT(6) +#define QCA8337_SGMII_SEL_CLK125M BIT(7) +#define QCA8337_SGMII_TXDR_CTRL_600mV BIT(10) +#define QCA8337_SGMII_CDR_BW_8 BIT(13) +#define QCA8337_SGMII_DIS_AUTO_LPI_25M BIT(16) +#define QCA8337_SGMII_MODE_CTRL_SGMII_PHY BIT(22) +#define QCA8337_SGMII_PAUSE_SG_TX_EN_25M BIT(24) +#define QCA8337_SGMII_ASYM_PAUSE_25M BIT(25) +#define QCA8337_SGMII_PAUSE_25M BIT(26) +#define QCA8337_SGMII_HALF_DUPLEX_25M BIT(30) +#define QCA8337_SGMII_FULL_DUPLEX_25M BIT(31) + +/* PORT_LOOKUP_CTRL */ +#define QCA8337_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc) +#define QCA8337_PORT_LOOKUP_MEMBER BITS(0, 7) +#define QCA8337_PORT_LOOKUP_IN_MODE BITS(8, 2) +#define QCA8337_PORT_LOOKUP_IN_MODE_S 8 +#define QCA8337_PORT_LOOKUP_STATE BITS(16, 3) +#define QCA8337_PORT_LOOKUP_STATE_S 16 +#define QCA8337_PORT_LOOKUP_LEARN BIT(20) + +/* PORT_VLAN_CTRL0 */ +#define QCA8337_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8) +#define QCA8337_PORT_VLAN0_DEF_SVID BITS(0, 12) +#define QCA8337_PORT_VLAN0_DEF_SVID_S 0 +#define QCA8337_PORT_VLAN0_DEF_CVID BITS(16, 12) +#define QCA8337_PORT_VLAN0_DEF_CVID_S 16 + +/* PORT_VLAN_CTRL1 */ +#define QCA8337_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8) +#define QCA8337_PORT_VLAN1_PORT_VLAN_PROP BIT(6) +#define QCA8337_PORT_VLAN1_OUT_MODE BITS(12, 2) +#define QCA8337_PORT_VLAN1_OUT_MODE_S 12 +#define QCA8337_PORT_VLAN1_OUT_MODE_UNMOD 0 +#define QCA8337_PORT_VLAN1_OUT_MODE_UNTAG 1 +#define QCA8337_PORT_VLAN1_OUT_MODE_TAG 2 +#define QCA8337_PORT_VLAN1_OUT_MODE_UNTOUCH 3 + +/* MODULE_EN */ +#define QCA8337_REG_MODULE_EN 0x030 +#define QCA8337_MODULE_EN_MIB BIT(0) + +/* MIB */ +#define QCA8337_REG_MIB 0x034 +#define QCA8337_MIB_FLUSH BIT(24) +#define QCA8337_MIB_CPU_KEEP BIT(20) +#define QCA8337_MIB_BUSY BIT(17) + +/* PORT_HEADER_CTRL */ +#define QCA8337_REG_PORT_HEADER(_i) (0x09c + (_i) * 4) +#define QCA8337_PORT_HDR_CTRL_RX_S 2 +#define QCA8337_PORT_HDR_CTRL_TX_S 0 +#define QCA8337_PORT_HDR_CTRL_ALL 2 + +/* EEE_CTRL */ +#define QCA8337_REG_EEE_CTRL 0x100 +#define QCA8337_EEE_CTRL_DISABLE 0x0 /*EEE disable*/ + +/* VTU_FUNC_REG0 */ +#define QCA8337_REG_VTU_FUNC0 0x0610 +#define QCA8337_VTU_FUNC0_EG_MODE BITS(4, 14) +#define QCA8337_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +#define QCA8337_VTU_FUNC0_EG_MODE_KEEP 0 +#define QCA8337_VTU_FUNC0_EG_MODE_UNTAG 1 +#define QCA8337_VTU_FUNC0_EG_MODE_TAG 2 +#define QCA8337_VTU_FUNC0_EG_MODE_NOT 3 +#define QCA8337_VTU_FUNC0_IVL BIT(19) +#define QCA8337_VTU_FUNC0_VALID BIT(20) + +/* VTU_FUNC_REG1 */ +#define QCA8337_REG_VTU_FUNC1 0x0614 +#define QCA8337_VTU_FUNC1_OP BITS(0, 3) +#define QCA8337_VTU_FUNC1_OP_NOOP 0 +#define QCA8337_VTU_FUNC1_OP_FLUSH 1 +#define QCA8337_VTU_FUNC1_OP_LOAD 2 +#define QCA8337_VTU_FUNC1_OP_PURGE 3 +#define QCA8337_VTU_FUNC1_OP_REMOVE_PORT 4 +#define QCA8337_VTU_FUNC1_OP_GET_NEXT 5 +#define QCA8337_VTU_FUNC1_OP_GET_ONE 6 +#define QCA8337_VTU_FUNC1_FULL BIT(4) +#define QCA8337_VTU_FUNC1_PORT BIT(8, 4) +#define QCA8337_VTU_FUNC1_PORT_S 8 +#define QCA8337_VTU_FUNC1_VID BIT(16, 12) +#define QCA8337_VTU_FUNC1_VID_S 16 +#define QCA8337_VTU_FUNC1_BUSY BIT(31) + +#define QCA8337_REG_ATU_FUNC 0x60c +#define QCA8337_ATU_FUNC_BUSY BIT(31) +#define QCA8337_ATU_FUNC_OP_GET_NEXT 0x6 +#define QCA8337_REG_ATU_DATA0 0x600 +#define QCA8337_REG_ATU_DATA1 0x604 +#define QCA8337_REG_ATU_DATA2 0x608 + +#define QCA8337_GLOBAL_INT1 0x0024 +#define QCA8337_GLOBAL_INT1_MASK 0x002c + +/* port speed */ +enum { + QCA8337_PORT_SPEED_10M = 0, + QCA8337_PORT_SPEED_100M = 1, + QCA8337_PORT_SPEED_1000M = 2, + QCA8337_PORT_SPEED_ERR = 3, +}; + +/* ingress 802.1q mode */ +enum { + QCA8337_IN_PORT_ONLY = 0, + QCA8337_IN_PORT_FALLBACK = 1, + QCA8337_IN_VLAN_ONLY = 2, + QCA8337_IN_SECURE = 3 +}; + +/* egress 802.1q mode */ +enum { + QCA8337_OUT_KEEP = 0, + QCA8337_OUT_STRIP_VLAN = 1, + QCA8337_OUT_ADD_VLAN = 2 +}; + +/* port forwarding state */ +enum { + QCA8337_PORT_STATE_DISABLED = 0, + QCA8337_PORT_STATE_BLOCK = 1, + QCA8337_PORT_STATE_LISTEN = 2, + QCA8337_PORT_STATE_LEARN = 3, + QCA8337_PORT_STATE_FORWARD = 4 +}; + +struct qca8337_priv; + +struct qca8337_switch_ops { + int (*hw_init)(struct qca8337_priv *priv); + void (*reset_switch)(struct qca8337_priv *priv); + + /* Switch internal register read/write function */ + u32 (*read)(struct qca8337_priv *priv, u32 reg); + void (*write)(struct qca8337_priv *priv, u32 reg, u32 val); +}; + +struct port_link_info { + bool link; + int speed; + int duplex; + int aneg; + int rx_flow; + int tx_flow; +}; + +struct qca8337_priv { + struct device *dev; + struct phy_device *phy; + u8 chip_ver; + u8 chip_rev; + u8 cpu_port; + u8 ports; + u16 vlans; + u8 num_phy; + u32 old_port_status; + char buf[2048]; + + struct qca8337_switch_ops *ops; + struct regmap *regmap; +}; + +struct qca8337_mib_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +void qca8337_check(void); + +u32 qca8337_read(struct qca8337_priv *priv, u32 reg); +void qca8337_write(struct qca8337_priv *priv, u32 reg, u32 val); +#endif /*__QCA8337_H__*/ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 0726de4df0f0..ed40c565a8a3 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -308,5 +308,7 @@ struct plat_stmmacenet_data { bool mdio_op_busy; atomic_t phy_clks_suspended; struct completion mdio_op; + int board_type; + int phy_type; }; #endif diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h new file mode 100644 index 000000000000..c38d1a841aa7 --- /dev/null +++ b/include/linux/usb/msm_hsusb.h @@ -0,0 +1,390 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __ASM_ARCH_MSM_HSUSB_H +#define __ASM_ARCH_MSM_HSUSB_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * Used different VDDCX voltage values + */ +enum usb_vdd_value { + VDD_NONE = 0, + VDD_MIN, + VDD_MAX, + VDD_VAL_MAX, +}; + +/** + * Requested USB votes for NOC frequency + * + * USB_NOC_NOM_VOTE Vote for NOM set of NOC frequencies + * USB_NOC_SVS_VOTE Vote for SVS set of NOC frequencies + * + */ +enum usb_noc_mode { + USB_NOC_NOM_VOTE = 0, + USB_NOC_SVS_VOTE, + USB_NOC_NUM_VOTE, +}; + +/** + * Different states involved in USB charger detection. + * + * USB_CHG_STATE_UNDEFINED USB charger is not connected or detection + * process is not yet started. + * USB_CHG_STATE_IN_PROGRESS Charger detection in progress + * USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact. + * USB_CHG_STATE_DCD_DONE Data pin contact is detected. + * USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects + * between SDP and DCP/CDP). + * USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects + * between DCP and CDP). + * USB_CHG_STATE_DETECTED USB charger type is determined. + * USB_CHG_STATE_QUEUE_SM_WORK SM work to start/stop gadget is queued. + * + */ +enum usb_chg_state { + USB_CHG_STATE_UNDEFINED = 0, + USB_CHG_STATE_IN_PROGRESS, + USB_CHG_STATE_WAIT_FOR_DCD, + USB_CHG_STATE_DCD_DONE, + USB_CHG_STATE_PRIMARY_DONE, + USB_CHG_STATE_SECONDARY_DONE, + USB_CHG_STATE_DETECTED, + USB_CHG_STATE_QUEUE_SM_WORK, +}; + +/** + * USB charger types + * + * USB_INVALID_CHARGER Invalid USB charger. + * USB_SDP_CHARGER Standard downstream port. Refers to a downstream port + * on USB2.0 compliant host/hub. + * USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger). + * USB_CDP_CHARGER Charging downstream port. Enumeration can happen and + * IDEV_CHG_MAX can be drawn irrespective of USB state. + * USB_NONCOMPLIANT_CHARGER A non-compliant charger pull DP and DM to specific + * voltages between 2.0-3.3v for identification. + * + */ +enum usb_chg_type { + USB_INVALID_CHARGER = 0, + USB_SDP_CHARGER, + USB_DCP_CHARGER, + USB_CDP_CHARGER, + USB_NONCOMPLIANT_CHARGER, + USB_FLOATED_CHARGER, +}; + +/** + * Maintain state for hvdcp external charger status + * DEFAULT This is used when DCP is detected + * ACTIVE This is used when ioctl is called to block LPM + * INACTIVE This is used when ioctl is called to unblock LPM + */ + +enum usb_ext_chg_status { + DEFAULT = 1, + ACTIVE, + INACTIVE, +}; + +/** + * USB ID state + */ +enum usb_id_state { + USB_ID_GROUND = 0, + USB_ID_FLOAT, +}; + +#define USB_NUM_BUS_CLOCKS 3 + +/** + * struct msm_otg: OTG driver data. Shared by HCD and DCD. + * @otg: USB OTG Transceiver structure. + * @pdata: otg device platform data. + * @irq: IRQ number assigned for HSUSB controller. + * @async_irq: IRQ number used by some controllers during low power state + * @phy_irq: IRQ number assigned for PHY to notify events like id and line + state changes. + * @pclk: clock struct of iface_clk. + * @core_clk: clock struct of core_bus_clk. + * @sleep_clk: clock struct of sleep_clk for USB PHY. + * @phy_reset_clk: clock struct of phy_reset_clk for USB PHY. This clock is + a reset only clock and resets the PHY, ULPI bridge and + CSR wrapper. + * @phy_por_clk: clock struct of phy_por_clk for USB PHY. This clock is + a reset only clock and resets only the PHY (POR). + * @phy_csr_clk: clock struct of phy_csr_clk for USB PHY. This clock is + required to access PHY CSR registers via AHB2PHY interface. + * @bus_clks: bimc/snoc/pcnoc clock struct. + * @core_reset: Reset control for core_clk + * @phy_reset: Reset control for phy_reset_clk + * @phy_por_reset: Reset control for phy_por_clk + * @default_noc_mode: default frequency for NOC clocks - SVS or NOM + * @core_clk_rate: core clk max frequency + * @regs: ioremapped register base address. + * @usb_phy_ctrl_reg: relevant PHY_CTRL_REG register base address. + * @inputs: OTG state machine inputs(Id, SessValid etc). + * @sm_work: OTG state machine work. + * @sm_work_pending: OTG state machine work is pending, queued post pm_resume + * @resume_pending: USB h/w lpm_exit pending. Done on next sm_work run + * @pm_suspended: OTG device is system(PM) suspended. + * @pm_notify: Notifier to receive system wide PM transition events. + It is used to defer wakeup events processing until + system is RESUMED. + * @in_lpm: indicates low power mode (LPM) state. + * @async_int: IRQ line on which ASYNC interrupt arrived in LPM. + * @cur_power: The amount of mA available from downstream port. + * @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND). + * @chg_work: Charger detection work. + * @chg_state: The state of charger detection process. + * @chg_type: The type of charger attached. + * @chg_detection: True if PHY is doing charger type detection. + * @bus_perf_client: Bus performance client handle to request BUS bandwidth + * @host_bus_suspend: indicates host bus suspend or not. + * @device_bus_suspend: indicates device bus suspend or not. + * @bus_clks_enabled: indicates pcnoc/snoc/bimc clocks are on or not. + * @is_ext_chg_dcp: To indicate whether charger detected by external entity + SMB hardware is DCP charger or not. + * @ext_id_irq: IRQ for ID interrupt. + * @phy_irq_pending: Gets set when PHY IRQ arrives in LPM. + * @id_state: Indicates USBID line status. + * @rm_pulldown: Indicates pulldown status on D+ and D- data lines. + * @dpdm_desc: Regulator descriptor for D+ and D- voting. + * @dpdm_rdev: Regulator class device for dpdm regulator. + * @dbg_idx: Dynamic debug buffer Index. + * @dbg_lock: Dynamic debug buffer Lock. + * @buf: Dynamic Debug Buffer. + * @max_nominal_system_clk_rate: max freq at which system clock can run in + nominal mode. + * @sdp_check: SDP detection work in case of USB_FLOAT power supply + * @notify_charger_work: Charger notification work. + */ +struct msm_otg { + struct usb_phy phy; + struct msm_otg_platform_data *pdata; + struct platform_device *pdev; + struct mutex lock; + int irq; + int async_irq; + int phy_irq; + struct clk *xo_clk; + struct clk *pclk; + struct clk *core_clk; + struct clk *sleep_clk; + struct clk *phy_reset_clk; + struct clk *phy_por_clk; + struct clk *phy_csr_clk; + struct clk *bus_clks[USB_NUM_BUS_CLOCKS]; + struct clk *phy_ref_clk; + struct reset_control *core_reset; + struct reset_control *phy_reset; + struct reset_control *phy_por_reset; + long core_clk_rate; + long core_clk_svs_rate; + long core_clk_nominal_rate; + enum usb_noc_mode default_noc_mode; + struct resource *io_res; + void __iomem *regs; + void __iomem *phy_csr_regs; + void __iomem *usb_phy_ctrl_reg; +#define ID 0 +#define B_SESS_VLD 1 +#define A_BUS_SUSPEND 14 + unsigned long inputs; + struct work_struct sm_work; + bool sm_work_pending; + bool resume_pending; + atomic_t pm_suspended; + struct notifier_block pm_notify; + atomic_t in_lpm; + bool err_event_seen; + int async_int; + unsigned int cur_power; + struct workqueue_struct *otg_wq; + struct delayed_work chg_work; + struct delayed_work id_status_work; + enum usb_chg_state chg_state; + enum usb_chg_type chg_type; + bool chg_detection; + unsigned int dcd_time; + unsigned long caps; + uint32_t bus_perf_client; + bool host_bus_suspend; + bool device_bus_suspend; + bool bus_clks_enabled; + /* + * Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v + * analog regulators while going to low power mode. + * Currently only 28nm PHY has the support to allowing PHY + * power collapse since it doesn't have leakage currents while + * turning off the power rails. + */ +#define ALLOW_PHY_POWER_COLLAPSE BIT(0) + /* + * Allow PHY RETENTION mode before turning off the digital + * voltage regulator(VDDCX). + */ +#define ALLOW_PHY_RETENTION BIT(1) + /* + * Allow putting the core in Low Power mode, when + * USB bus is suspended but cable is connected. + */ +#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2) + /* + * Allowing PHY regulators LPM puts the HSUSB 3.3v and 1.8v + * analog regulators into LPM while going to USB low power mode. + */ +#define ALLOW_PHY_REGULATORS_LPM BIT(3) + /* + * Allow PHY RETENTION mode before turning off the digital + * voltage regulator(VDDCX) during host mode. + */ +#define ALLOW_HOST_PHY_RETENTION BIT(4) + /* + * Allow VDD minimization without putting PHY into retention + * for fixing PHY current leakage issue when LDOs ar turned off. + */ +#define ALLOW_VDD_MIN_WITH_RETENTION_DISABLED BIT(5) + + /* + * PHY can keep D+ pull-up during peripheral bus suspend and + * D+/D- pull-down during host bus suspend without any + * re-work. This is possible only when PHY DVDD is supplied + * by a PMIC LDO (unlike VDDCX/VDDMX). + */ +#define ALLOW_BUS_SUSPEND_WITHOUT_REWORK BIT(6) + unsigned long lpm_flags; +#define PHY_PWR_COLLAPSED BIT(0) +#define PHY_RETENTIONED BIT(1) +#define XO_SHUTDOWN BIT(2) +#define CLOCKS_DOWN BIT(3) +#define PHY_REGULATORS_LPM BIT(4) + int reset_counter; + unsigned int online; + + dev_t ext_chg_dev; + struct pinctrl *phy_pinctrl; + bool is_ext_chg_dcp; + struct qpnp_vadc_chip *vadc_dev; + int ext_id_irq; + bool phy_irq_pending; + enum usb_id_state id_state; + bool rm_pulldown; + struct regulator_desc dpdm_rdesc; + struct regulator_dev *dpdm_rdev; +/* Maximum debug message length */ +#define DEBUG_MSG_LEN 128UL +/* Maximum number of messages */ +#define DEBUG_MAX_MSG 256UL + unsigned int dbg_idx; + rwlock_t dbg_lock; + + char (buf[DEBUG_MAX_MSG])[DEBUG_MSG_LEN]; /* buffer */ + unsigned int vbus_state; + unsigned int usb_irq_count; + int pm_qos_latency; + unsigned int notify_current_mA; + struct pm_qos_request pm_qos_req_dma; + struct delayed_work perf_vote_work; + struct delayed_work sdp_check; + struct work_struct notify_charger_work; + bool enable_sdp_check_timer; + struct icc_path *icc_paths; +}; + +struct ci13xxx_platform_data { + u8 usb_core_id; + int *tlmm_init_seq; + int tlmm_seq_count; + /* + * value of 2^(log2_itc-1) will be used as the interrupt threshold + * (ITC), when log2_itc is between 1 to 7. + */ + int log2_itc; + bool l1_supported; + bool enable_ahb2ahb_bypass; + bool enable_streaming; + bool enable_axi_prefetch; +}; + +/** + * struct msm_hsic_host_platform_data - platform device data + * for msm_hsic_host driver. + * @phy_sof_workaround: Enable ALL PHY SOF bug related workarounds for + * SUSPEND, RESET and RESUME. + * @phy_susp_sof_workaround: Enable PHY SOF workaround for + * SUSPEND. + * @phy_reset_sof_workaround: Enable PHY SOF workaround for + * RESET. + * @dis_internal_clk_gating: If set, internal clock gating in controller + * is disabled. + * + */ +struct msm_hsic_host_platform_data { + unsigned int strobe; + unsigned int data; + bool ignore_cal_pad_config; + bool phy_sof_workaround; + bool dis_internal_clk_gating; + bool phy_susp_sof_workaround; + bool phy_reset_sof_workaround; + u32 reset_delay; + int strobe_pad_offset; + int data_pad_offset; + + struct msm_bus_scale_pdata *bus_scale_table; + unsigned int log2_irq_thresh; + + /* gpio used to resume peripheral */ + unsigned int resume_gpio; + int *tlmm_init_seq; + int tlmm_seq_count; + + /*swfi latency is required while driving resume on to the bus */ + u32 swfi_latency; + + /*standalone latency is required when HSCI is active*/ + u32 standalone_latency; + bool pool_64_bit_align; + bool enable_hbm; + bool disable_park_mode; + bool consider_ipa_handshake; + bool ahb_async_bridge_bypass; + bool disable_cerr; +}; + +#ifdef CONFIG_USB_BAM +void msm_bam_set_usb_host_dev(struct device *dev); +int msm_do_bam_disable_enable(enum usb_ctrl ctrl); +#else +static inline void msm_bam_set_usb_host_dev(struct device *dev) {} +static inline int msm_do_bam_disable_enable(enum usb_ctrl ctrl) +{ return true; } +#endif +#ifdef CONFIG_USB_CI13XXX_MSM +void msm_hw_soft_reset(void); +#else +static inline void msm_hw_soft_reset(void) +{ +} +#endif + +#endif diff --git a/include/linux/usb/msm_hsusb_hw.h b/include/linux/usb/msm_hsusb_hw.h new file mode 100644 index 000000000000..e726626b6e94 --- /dev/null +++ b/include/linux/usb/msm_hsusb_hw.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__ +#define __LINUX_USB_GADGET_MSM72K_UDC_H__ + +/* USB phy selector - in TCSR address range */ +#define USB2_PHY_SEL 0xfd4ab000 + +#define USB_AHBBURST (MSM_USB_BASE + 0x0090) +#define USB_AHBMODE (MSM_USB_BASE + 0x0098) +#define USB_GENCONFIG (MSM_USB_BASE + 0x009C) +#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0) +#define USB_HS_GPTIMER_BASE (MSM_USB_BASE + 0x80) +#define ULPI_TX_PKT_EN_CLR_FIX BIT(19) + +#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */ +#define USB_HS_APF_CTRL (MSM_USB_BASE + 0x0380) + +#define APF_CTRL_EN BIT(0) + +#define USB_USBCMD (MSM_USB_BASE + 0x0140) +#define USB_USBSTS (MSM_USB_BASE + 0x0144) +#define USB_PORTSC (MSM_USB_BASE + 0x0184) +#define USB_OTGSC (MSM_USB_BASE + 0x01A4) +#define USB_USBMODE (MSM_USB_BASE + 0x01A8) +#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240) +#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278) + +#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7) +#define GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN BIT(12) +#define GENCONFIG_2_SYS_CLK_HOST_DEV_GATE_EN BIT(13) +#define GENCONFIG_2_DPSE_DMSE_HV_INTR_EN BIT(15) +#define USBCMD_SESS_VLD_CTRL BIT(25) + +#define USBCMD_RESET 2 +#define USB_USBINTR (MSM_USB_BASE + 0x0148) +#define USB_FRINDEX (MSM_USB_BASE + 0x014C) + +#define AHB2AHB_BYPASS BIT(31) +#define AHB2AHB_BYPASS_BIT_MASK BIT(31) +#define AHB2AHB_BYPASS_CLEAR (0 << 31) +#define USB_L1_EP_CTRL (MSM_USB_BASE + 0x0250) +#define USB_L1_CONFIG (MSM_USB_BASE + 0x0254) + +#define L1_CONFIG_LPM_EN BIT(4) +#define L1_CONFIG_REMOTE_WAKEUP BIT(5) +#define L1_CONFIG_GATE_SYS_CLK BIT(7) +#define L1_CONFIG_PHY_LPM BIT(10) +#define L1_CONFIG_PLL BIT(11) + +#define PORTSC_PHCD (1 << 23) /* phy suspend mode */ +#define PORTSC_PTS_MASK (3 << 30) +#define PORTSC_PTS_ULPI (2 << 30) +#define PORTSC_PTS_SERIAL (3 << 30) +#define PORTSC_LS (3 << 10) +#define PORTSC_LS_DM (1 << 10) +#define PORTSC_CCS (1 << 0) + +#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170) +#define ULPI_RUN (1 << 30) +#define ULPI_WRITE (1 << 29) +#define ULPI_READ (0 << 29) +#define ULPI_SYNC_STATE (1 << 27) +#define ULPI_ADDR(n) (((n) & 255) << 16) +#define ULPI_DATA(n) ((n) & 255) +#define ULPI_DATA_READ(n) (((n) >> 8) & 255) + +#define GENCONFIG_BAM_DISABLE (1 << 13) +#define GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE (1 << 4) +#define GENCONFIG_ULPI_SERIAL_EN (1 << 5) + +/* synopsys 28nm phy registers */ +#define ULPI_PWR_CLK_MNG_REG 0x88 +#define OTG_COMP_DISABLE BIT(0) + +#define ULPI_MISC_A 0x96 +#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1) +#define ULPI_MISC_A_VBUSVLDEXT BIT(0) + +#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */ +#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */ +#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */ +#define PHY_IDHV_INTEN (1 << 8) /* PHY ID HV interrupt */ +#define PHY_OTGSESSVLDHV_INTEN (1 << 9) /* PHY Session Valid HV int. */ +#define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */ +#define PHY_POR_BIT_MASK BIT(0) +#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */ +#define PHY_POR_DEASSERT (0 << 0) /* USB2 28nm PHY POR DEASSERT */ + +/* OTG definitions */ +#define OTGSC_INTSTS_MASK (0x7f << 16) +#define OTGSC_IDPU (1 << 5) +#define OTGSC_ID (1 << 8) +#define OTGSC_BSV (1 << 11) +#define OTGSC_IDIS (1 << 16) +#define OTGSC_BSVIS (1 << 19) +#define OTGSC_IDIE (1 << 24) +#define OTGSC_BSVIE (1 << 27) + +/* USB PHY CSR registers and bit definitions */ + +#define USB_PHY_CSR_PHY_UTMI_CTRL0 (MSM_USB_PHY_CSR_BASE + 0x060) +#define TERM_SEL BIT(6) +#define SLEEP_M BIT(1) +#define PORT_SELECT BIT(2) +#define OP_MODE_MASK 0x30 + +#define USB_PHY_CSR_PHY_UTMI_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x064) +#define DM_PULLDOWN BIT(3) +#define DP_PULLDOWN BIT(2) +#define XCVR_SEL_MASK 0x3 + +#define USB_PHY_CSR_PHY_UTMI_CTRL2 (MSM_USB_PHY_CSR_BASE + 0x068) + +#define USB_PHY_CSR_PHY_UTMI_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x06c) + +#define USB_PHY_CSR_PHY_UTMI_CTRL4 (MSM_USB_PHY_CSR_BASE + 0x070) +#define TX_VALID BIT(0) + +#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078) +#define SIDDQ BIT(2) + +#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C) +#define ID_HV_CLAMP_EN_N BIT(1) + +#define USB_PHY_CSR_PHY_CTRL2 (MSM_USB_PHY_CSR_BASE + 0x090) +#define USB2_SUSPEND_N BIT(6) + +#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094) +#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2) + +#define USB_PHY_CSR_PHY_CFG0 (MSM_USB_PHY_CSR_BASE + 0x0c4) + +#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0) +#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C) + +#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC) +#define USB2_PHY_USB_PHY_DPDM_CLEAR_MASK 0x1E + +#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0) + +#define USB2_PHY_USB_PHY_INTERRUPT_MASK0 (MSM_USB_PHY_CSR_BASE + 0x0D4) +#define USB2_PHY_USB_PHY_DP_1_0_MASK BIT(4) +#define USB2_PHY_USB_PHY_DP_0_1_MASK BIT(3) +#define USB2_PHY_USB_PHY_DM_1_0_MASK BIT(2) +#define USB2_PHY_USB_PHY_DM_0_1_MASK BIT(1) + +#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8) + +#define USB_PHY_IDDIG_1_0 BIT(7) + +#define USB_PHY_IDDIG_RISE_MASK BIT(0) +#define USB_PHY_IDDIG_FALL_MASK BIT(1) +#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK) + +#define ENABLE_DP_MANUAL_PULLUP BIT(0) +#define ENABLE_SECONDARY_PHY BIT(1) +#define PHY_SOFT_CONNECT BIT(12) + +/* + * The following are bit fields describing the usb_request.udc_priv word. + * These bit fields are set by function drivers that wish to queue + * usb_requests with sps/bam parameters. + */ +#define MSM_TX_PIPE_ID_OFS (16) +#define MSM_SPS_MODE BIT(5) +#define MSM_IS_FINITE_TRANSFER BIT(6) +#define MSM_PRODUCER BIT(7) +#define MSM_DISABLE_WB BIT(8) +#define MSM_ETD_IOC BIT(9) +#define MSM_INTERNAL_MEM BIT(10) +#define MSM_VENDOR_ID BIT(16) + +#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */ diff --git a/include/linux/usb_bam.h b/include/linux/usb_bam.h index 2030bed70fdb..5f60e9f99f94 100644 --- a/include/linux/usb_bam.h +++ b/include/linux/usb_bam.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2011-2017, 2019, 2021, The Linux Foundation. All rights reserved. - * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _USB_BAM_H_ @@ -17,6 +17,7 @@ /* Supported USB controllers*/ enum usb_ctrl { USB_CTRL_UNUSED = 0, + CI_CTRL, /* ChipIdea controller */ NUM_CTRL, }; @@ -191,6 +192,7 @@ int usb_bam_alloc_fifos(enum usb_ctrl cur_bam, u8 idx); int usb_bam_free_fifos(enum usb_ctrl cur_bam, u8 idx); int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx, phys_addr_t *p_addr, u32 *bam_size); +static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable); #else static inline int usb_bam_connect(enum usb_ctrl bam, u8 idx, u32 *bam_pipe_idx, unsigned long iova) @@ -264,6 +266,11 @@ static inline int get_qdss_bam_info(enum usb_ctrl cur_bam, u8 idx, { return false; } + +static inline bool msm_usb_bam_enable(enum usb_ctrl ctrl, bool bam_enable) +{ + return true; +} #endif /* CONFIG_PM */ diff --git a/include/net/cnss.h b/include/net/cnss.h new file mode 100644 index 000000000000..34dc62a28c98 --- /dev/null +++ b/include/net/cnss.h @@ -0,0 +1,255 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _NET_CNSS_H_ +#define _NET_CNSS_H_ + +#include +#include +#include +#include +#include + +#ifdef CONFIG_CNSS +#define MAX_FIRMWARE_SIZE (1 * 1024 * 1024) +#define CNSS_MAX_FILE_NAME 20 +#define PINCTRL_SLEEP 0 +#define PINCTRL_ACTIVE 1 + +enum cnss_bus_width_type { + CNSS_BUS_WIDTH_NONE, + CNSS_BUS_WIDTH_LOW, + CNSS_BUS_WIDTH_MEDIUM, + CNSS_BUS_WIDTH_HIGH +}; + +enum cnss_cc_src { + CNSS_SOURCE_CORE, + CNSS_SOURCE_11D, + CNSS_SOURCE_USER +}; + +/* FW image files */ +struct cnss_fw_files { + char image_file[CNSS_MAX_FILE_NAME]; + char board_data[CNSS_MAX_FILE_NAME]; + char otp_data[CNSS_MAX_FILE_NAME]; + char utf_file[CNSS_MAX_FILE_NAME]; + char utf_board_data[CNSS_MAX_FILE_NAME]; + char epping_file[CNSS_MAX_FILE_NAME]; + char evicted_data[CNSS_MAX_FILE_NAME]; +}; + +struct cnss_wlan_runtime_ops { + int (*runtime_suspend)(struct pci_dev *pdev); + int (*runtime_resume)(struct pci_dev *pdev); +}; + +struct cnss_wlan_driver { + char *name; + int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id); + void (*remove)(struct pci_dev *pdev); + int (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id); + void (*shutdown)(struct pci_dev *pdev); + void (*crash_shutdown)(struct pci_dev *pdev); + int (*suspend)(struct pci_dev *pdev, pm_message_t state); + int (*resume)(struct pci_dev *pdev); + void (*modem_status)(struct pci_dev *pdev, int state); + void (*update_status)(struct pci_dev *pdev, uint32_t status); + struct cnss_wlan_runtime_ops *runtime_ops; + const struct pci_device_id *id_table; +}; + +/* + * codeseg_total_bytes: Total bytes across all the codesegment blocks + * num_codesegs: No of Pages used + * codeseg_size: Size of each segment. Should be power of 2 and multiple of 4K + * codeseg_size_log2: log2(codeseg_size) + * codeseg_busaddr: Physical address of the DMAble memory;4K aligned + */ + +#define CODESWAP_MAX_CODESEGS 16 +struct codeswap_codeseg_info { + u32 codeseg_total_bytes; + u32 num_codesegs; + u32 codeseg_size; + u32 codeseg_size_log2; + void *codeseg_busaddr[CODESWAP_MAX_CODESEGS]; +}; + +struct image_desc_info { + dma_addr_t fw_addr; + u32 fw_size; + dma_addr_t bdata_addr; + u32 bdata_size; +}; + +/* platform capabilities */ +enum cnss_platform_cap_flag { + CNSS_HAS_EXTERNAL_SWREG = 0x01, + CNSS_HAS_UART_ACCESS = 0x02, +}; + +struct cnss_platform_cap { + u32 cap_flag; +}; + +/* WLAN driver status, keep it aligned with cnss2 */ +enum cnss_driver_status { + CNSS_UNINITIALIZED, + CNSS_INITIALIZED, + CNSS_LOAD_UNLOAD, + CNSS_RECOVERY, + CNSS_FW_DOWN, + CNSS_SSR_FAIL, +}; + +enum cnss_runtime_request { + CNSS_PM_RUNTIME_GET, + CNSS_PM_RUNTIME_PUT, + CNSS_PM_RUNTIME_MARK_LAST_BUSY, + CNSS_PM_RUNTIME_RESUME, + CNSS_PM_RUNTIME_PUT_NOIDLE, + CNSS_PM_REQUEST_RESUME, + CNSS_PM_RUNTIME_PUT_AUTO, + CNSS_PM_GET_NORESUME, +}; + +struct dma_iommu_mapping *cnss_smmu_get_mapping(void); +int cnss_smmu_map(phys_addr_t paddr, uint32_t *iova_addr, size_t size); +int cnss_get_fw_image(struct image_desc_info *image_desc_info); +void cnss_runtime_init(struct device *dev, int auto_delay); +void cnss_runtime_exit(struct device *dev); +void cnss_wlan_pci_link_down(void); +int cnss_pcie_shadow_control(struct pci_dev *dev, bool enable); +int cnss_wlan_register_driver(struct cnss_wlan_driver *driver); +void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver); +int cnss_get_fw_files(struct cnss_fw_files *pfw_files); +int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files, + u32 target_type, u32 target_version); +void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files, + u32 size, u32 tufello_dual_fw); + +int cnss_request_bus_bandwidth(int bandwidth); + +#ifdef CONFIG_CNSS_SECURE_FW +int cnss_get_sha_hash(const u8 *data, u32 data_len, + u8 *hash_idx, u8 *out); +void *cnss_get_fw_ptr(void); +#endif + +int cnss_get_codeswap_struct(struct codeswap_codeseg_info *swap_seg); +int cnss_get_bmi_setup(void); + +#ifdef CONFIG_PCI_MSM +int cnss_wlan_pm_control(bool vote); +#endif +void cnss_lock_pm_sem(void); +void cnss_release_pm_sem(void); + +void cnss_request_pm_qos_type(int latency_type, u32 qos_val); +void cnss_request_pm_qos(u32 qos_val); +void cnss_remove_pm_qos(void); + +void cnss_pci_request_pm_qos_type(int latency_type, u32 qos_val); +void cnss_pci_request_pm_qos(u32 qos_val); +void cnss_pci_remove_pm_qos(void); + +void cnss_sdio_request_pm_qos_type(int latency_type, u32 qos_val); +void cnss_sdio_request_pm_qos(u32 qos_val); +void cnss_sdio_remove_pm_qos(void); + +int cnss_get_platform_cap(struct cnss_platform_cap *cap); +void cnss_set_driver_status(enum cnss_driver_status driver_status); + +#ifndef CONFIG_WCNSS_MEM_PRE_ALLOC +static inline int wcnss_pre_alloc_reset(void) { return 0; } +#endif + +int msm_pcie_enumerate(u32 rc_idx); +int cnss_auto_suspend(void); +int cnss_auto_resume(void); +int cnss_prevent_auto_suspend(const char *caller_func); +int cnss_allow_auto_suspend(const char *caller_func); +int cnss_is_auto_suspend_allowed(const char *caller_func); + +int cnss_pm_runtime_request(struct device *dev, enum + cnss_runtime_request request); +void cnss_set_cc_source(enum cnss_cc_src cc_source); +enum cnss_cc_src cnss_get_cc_source(void); +#endif + +void cnss_pm_wake_lock_init(struct wakeup_source **ws, const char *name); +void cnss_pm_wake_lock(struct wakeup_source *ws); + +void cnss_device_crashed(void); +void cnss_device_self_recovery(void); +void *cnss_get_virt_ramdump_mem(unsigned long *size); + +void cnss_schedule_recovery_work(void); +int cnss_pcie_set_wlan_mac_address(const u8 *in, uint32_t len); +u8 *cnss_get_wlan_mac_address(struct device *dev, uint32_t *num); +int cnss_sdio_set_wlan_mac_address(const u8 *in, uint32_t len); + +enum { + CNSS_RESET_SOC = 0, + CNSS_RESET_SUBSYS_COUPLED, + CNSS_RESET_LEVEL_MAX +}; + +int cnss_get_restart_level(void); + +struct cnss_sdio_wlan_driver { + const char *name; + const struct sdio_device_id *id_table; + int (*probe)(struct sdio_func *func, const struct sdio_device_id *id); + void (*remove)(struct sdio_func *func); + int (*reinit)(struct sdio_func *func, const struct sdio_device_id *id); + void (*shutdown)(struct sdio_func *func); + void (*crash_shutdown)(struct sdio_func *func); + int (*suspend)(struct device *dev); + int (*resume)(struct device *dev); +}; + +int cnss_sdio_wlan_register_driver(struct cnss_sdio_wlan_driver *driver); +void cnss_sdio_wlan_unregister_driver(struct cnss_sdio_wlan_driver *driver); + +typedef void (*oob_irq_handler_t)(void *dev_para); +int cnss_wlan_query_oob_status(void); +int cnss_wlan_register_oob_irq_handler(oob_irq_handler_t handler, void *pm_oob); +int cnss_wlan_unregister_oob_irq_handler(void *pm_oob); + +void cnss_dump_stack(struct task_struct *task); +u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num); +void cnss_init_work(struct work_struct *work, work_func_t func); +void cnss_flush_delayed_work(void *dwork); +void cnss_flush_work(void *work); +void cnss_pm_wake_lock_timeout(struct wakeup_source *ws, ulong msec); +void cnss_pm_wake_lock_release(struct wakeup_source *ws); +void cnss_pm_wake_lock_destroy(struct wakeup_source *ws); +void cnss_get_monotonic_boottime(struct timespec64 *ts); +void cnss_get_boottime(struct timespec64 *ts); +void cnss_init_delayed_work(struct delayed_work *work, work_func_t func); +int cnss_vendor_cmd_reply(struct sk_buff *skb); +int cnss_set_cpus_allowed_ptr(struct task_struct *task, ulong cpu); +int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count); +int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count, u16 buf_len); +int cnss_wlan_set_dfs_nol(const void *info, u16 info_len); +int cnss_wlan_get_dfs_nol(void *info, u16 info_len); +int cnss_common_request_bus_bandwidth(struct device *dev, int bandwidth); +void cnss_common_device_crashed(struct device *dev); +void cnss_common_device_self_recovery(struct device *dev); +void *cnss_common_get_virt_ramdump_mem(struct device *dev, unsigned long *size); +void cnss_common_schedule_recovery_work(struct device *dev); +int cnss_common_set_wlan_mac_address(struct device *dev, const u8 *in, uint32_t len); +u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num); +int cnss_power_up(struct device *dev); +int cnss_power_down(struct device *dev); +int cnss_sdio_configure_spdt(bool state); + +int cnss_common_register_tsf_captured_handler(struct device *dev, irq_handler_t handler, + void *ctx); +int cnss_common_unregister_tsf_captured_handler(struct device *dev, void *ctx); +#endif /* _NET_CNSS_H_ */ diff --git a/include/soc/qcom/devfreq_icc.h b/include/soc/qcom/devfreq_icc.h new file mode 100644 index 000000000000..231f1b6e9980 --- /dev/null +++ b/include/soc/qcom/devfreq_icc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2013-2014, 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DEVFREQ_ICC_H +#define _DEVFREQ_ICC_H + +#include + +#ifdef CONFIG_QCOM_DEVFREQ_ICC +int devfreq_add_icc(struct device *dev); +int devfreq_remove_icc(struct device *dev); +int devfreq_suspend_icc(struct device *dev); +int devfreq_resume_icc(struct device *dev); +#else +static inline int devfreq_add_icc(struct device *dev) +{ + return 0; +} +static inline int devfreq_remove_icc(struct device *dev) +{ + return 0; +} +static inline int devfreq_suspend_icc(struct device *dev) +{ + return 0; +} +static inline int devfreq_resume_icc(struct device *dev) +{ + return 0; +} +#endif + +#endif /* _DEVFREQ_ICC_H */ diff --git a/include/soc/qcom/spm.h b/include/soc/qcom/spm.h index 4951f9d8b0bd..b356a89ed57c 100644 --- a/include/soc/qcom/spm.h +++ b/include/soc/qcom/spm.h @@ -2,6 +2,7 @@ /* * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. * Copyright (c) 2014,2015, Linaro Ltd. + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef __SPM_H__ @@ -37,7 +38,171 @@ struct spm_driver_data { const struct spm_reg_data *reg_data; }; -void spm_set_low_power_mode(struct spm_driver_data *drv, - enum pm_sleep_mode mode); +enum { + MSM_SPM_MODE_DISABLED, + MSM_SPM_MODE_CLOCK_GATING, + MSM_SPM_MODE_RETENTION, + MSM_SPM_MODE_GDHS, + MSM_SPM_MODE_POWER_COLLAPSE, + MSM_SPM_MODE_STANDALONE_POWER_COLLAPSE, + MSM_SPM_MODE_FASTPC, + MSM_SPM_MODE_NR +}; +enum msm_spm_avs_irq { + MSM_SPM_AVS_IRQ_MIN, + MSM_SPM_AVS_IRQ_MAX, +}; + +struct msm_spm_device; +struct device_node; + +void spm_set_low_power_mode(struct spm_driver_data *drv, + enum pm_sleep_mode mode); + +#if defined(CONFIG_MSM_SPM) + +int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm); +void msm_spm_set_rpm_hs(bool allow_rpm_hs); +int msm_spm_probe_done(void); +int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel); +int msm_spm_get_vdd(unsigned int cpu); +int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node, + unsigned int val, int cpu, int vctl_offset); +struct msm_spm_device *msm_spm_get_device_by_name(const char *name); +int msm_spm_config_low_power_mode(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm); +int msm_spm_config_low_power_mode_addr(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm); +int msm_spm_device_init(void); +bool msm_spm_is_mode_avail(unsigned int mode); +void msm_spm_dump_regs(unsigned int cpu); +int msm_spm_is_avs_enabled(unsigned int cpu); +int msm_spm_avs_enable(unsigned int cpu); +int msm_spm_avs_disable(unsigned int cpu); +int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl, + uint32_t max_lvl); +int msm_spm_avs_enable_irq(unsigned int cpu, enum msm_spm_avs_irq irq); +int msm_spm_avs_disable_irq(unsigned int cpu, enum msm_spm_avs_irq irq); +int msm_spm_avs_clear_irq(unsigned int cpu, enum msm_spm_avs_irq irq); + +#if defined(CONFIG_MSM_L2_SPM) + +/* Public functions */ + +int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt); +int msm_spm_enable_fts_lpm(int cpu, uint32_t mode); + +#else + +static inline int msm_spm_apcs_set_phase(int cpu, unsigned int phase_cnt) +{ + return -ENODEV; +} + +static inline int msm_spm_enable_fts_lpm(int cpu, uint32_t mode) +{ + return -ENODEV; +} +#endif /* defined(CONFIG_MSM_L2_SPM) */ +#else /* defined(CONFIG_MSM_SPM) */ +static inline int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm) +{ + return -ENODEV; +} + +static inline void msm_spm_set_rpm_hs(bool allow_rpm_hs) {} + +static inline int msm_spm_probe_done(void) +{ + return -ENODEV; +} + +static inline int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) +{ + return -ENODEV; +} + +static inline int msm_spm_get_vdd(unsigned int cpu) +{ + return 0; +} + +static inline int msm_spm_turn_on_cpu_rail(struct device_node *l2ccc_node, + unsigned int val, int cpu, int vctl_offset) +{ + return -ENODEV; +} + +static inline int msm_spm_device_init(void) +{ + return -ENODEV; +} + +static inline void msm_spm_dump_regs(unsigned int cpu) +{ } + +static inline int msm_spm_config_low_power_mode(struct msm_spm_device *dev, + unsigned int mode, bool notify_rpm) +{ + return -ENODEV; +} + +static inline int msm_spm_config_low_power_mode_addr( + struct msm_spm_device *dev, unsigned int mode, bool notify_rpm) +{ + return -ENODEV; +} + +static inline struct msm_spm_device *msm_spm_get_device_by_name( + const char *name) +{ + return NULL; +} + +static inline bool msm_spm_is_mode_avail(unsigned int mode) +{ + return false; +} + +static inline int msm_spm_is_avs_enabled(unsigned int cpu) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_enable(unsigned int cpu) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_disable(unsigned int cpu) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_set_limit(unsigned int cpu, uint32_t min_lvl, + uint32_t max_lvl) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_enable_irq(unsigned int cpu, + enum msm_spm_avs_irq irq) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_disable_irq(unsigned int cpu, + enum msm_spm_avs_irq irq) +{ + return -ENODEV; +} + +static inline int msm_spm_avs_clear_irq(unsigned int cpu, + enum msm_spm_avs_irq irq) +{ + return -ENODEV; +} + +#endif /* defined (CONFIG_MSM_SPM) */ #endif /* __SPM_H__ */ diff --git a/modules.list.msm.autogvm b/modules.list.msm.autogvm index 484b2d232a78..b224032e14da 100644 --- a/modules.list.msm.autogvm +++ b/modules.list.msm.autogvm @@ -14,6 +14,7 @@ rename_devices.ko gh_msgq.ko gh_rm_drv.ko boot_stats.ko +bootmarker_proxy.ko virtio_clk_lemans.ko virtio_clk_direwolf.ko virtio_clk_sa8195p.ko @@ -21,7 +22,6 @@ virtio_clk_sm6150.ko virtio_clk_sm8150.ko virtio_clk_monaco.ko virtio_clk.ko -debug-regulator.ko virtio_regulator.ko iommu-logger.ko qcom_iommu_util.ko diff --git a/modules.list.msm.gen3auto b/modules.list.msm.gen3auto index 53f86cca4dc6..2b6c708d6d46 100644 --- a/modules.list.msm.gen3auto +++ b/modules.list.msm.gen3auto @@ -70,6 +70,7 @@ qcom_logbuf_vh.ko mem-hooks.ko soc_sleep_stats.ko boot_stats.ko +bootmarker_proxy.ko pcs_xpcs.ko micrel.ko stmmac.ko diff --git a/modules.list.msm.niobe b/modules.list.msm.niobe index 3024ddbc4d79..22733c1bd966 100644 --- a/modules.list.msm.niobe +++ b/modules.list.msm.niobe @@ -45,7 +45,7 @@ qcom_hwspinlock.ko smem.ko socinfo.ko qcom-ipcc.ko -phy-qcom-ufs-qmp-v4-pineapple.ko +phy-qcom-ufs-qmp-v4-niobe.ko phy-qcom-ufs.ko phy-qcom-ufs-qrbtc-sdm845.ko spmi-pmic-arb.ko diff --git a/modules.list.msm.seraph b/modules.list.msm.seraph index 53fa5addf6c8..15b56103d94a 100644 --- a/modules.list.msm.seraph +++ b/modules.list.msm.seraph @@ -12,3 +12,14 @@ clk-qcom.ko gdsc-regulator.ko cmd-db.ko qcom_rpmh.ko +stub-regulator.ko +arm_smmu.ko +iommu-logger.ko +qcom_iommu_util.ko +mem-hooks.ko +mem_buf_dev.ko +mem_buf.ko +qcom_dma_heaps.ko +msm_dma_iommu_mapping.ko +mem-offline.ko +qcom_iommu_debug.ko diff --git a/msm_kernel_le.bzl b/msm_kernel_le.bzl index af50506d4c9c..3064d613f0dc 100644 --- a/msm_kernel_le.bzl +++ b/msm_kernel_le.bzl @@ -7,6 +7,7 @@ load( "kernel_compile_commands", "kernel_images", "kernel_modules_install", + "kernel_uapi_headers_cc_library", "merged_kernel_uapi_headers", ) load( @@ -191,6 +192,17 @@ def _define_kernel_dist(target, msm_target, variant): log = "info", ) +def _define_uapi_library(target): + """Define a cc_library for userspace programs to use + + Args: + target: kernel_build target name (e.g. "kalama_gki") + """ + kernel_uapi_headers_cc_library( + name = "{}_uapi_header_library".format(target), + kernel_build = ":{}".format(target), + ) + def define_msm_le( msm_target, variant, @@ -253,6 +265,8 @@ def define_msm_le( boot_image_outs = ["boot.img"], ) + _define_uapi_library(target) + _define_kernel_dist(target, msm_target, variant) define_abl_dist(target, msm_target, variant) diff --git a/neo_la.bzl b/neo_la.bzl index bdf1454816ae..8525296c821f 100644 --- a/neo_la.bzl +++ b/neo_la.bzl @@ -7,6 +7,7 @@ target_name = "neo-la" def define_neo_la(): _neo_in_tree_modules = [ # keep sorted + "drivers/base/regmap/qti-regmap-debugfs.ko", "drivers/bus/mhi/devices/mhi_dev_dtr.ko", "drivers/bus/mhi/devices/mhi_dev_uci.ko", "drivers/bus/mhi/host/mhi.ko", @@ -15,6 +16,7 @@ def define_neo_la(): "drivers/clk/qcom/clk-dummy.ko", "drivers/clk/qcom/clk-qcom.ko", "drivers/clk/qcom/clk-rpmh.ko", + "drivers/clk/qcom/clk-spmi-pmic-div.ko", "drivers/clk/qcom/debugcc-neo.ko", "drivers/clk/qcom/dispcc-neo.ko", "drivers/clk/qcom/gcc-neo.ko", @@ -52,6 +54,10 @@ def define_neo_la(): "drivers/hwtracing/stm/stm_p_ost.ko", "drivers/hwtracing/stm/stm_p_sys-t.ko", "drivers/i2c/busses/i2c-msm-geni.ko", + "drivers/iio/adc/qcom-spmi-adc5.ko", + "drivers/iio/adc/qcom-vadc-common.ko", + "drivers/input/misc/pm8941-pwrkey.ko", + "drivers/interconnect/icc-test.ko", "drivers/interconnect/qcom/icc-bcm-voter.ko", "drivers/interconnect/qcom/icc-debug.ko", "drivers/interconnect/qcom/icc-rpmh.ko", @@ -67,15 +73,21 @@ def define_neo_la(): "drivers/mailbox/msm_qmp.ko", "drivers/mailbox/qcom-ipcc.ko", "drivers/mfd/qcom-i2c-pmic.ko", + "drivers/mfd/qcom-spmi-pmic.ko", "drivers/mmc/host/cqhci.ko", "drivers/mmc/host/sdhci-msm.ko", + "drivers/nvmem/nvmem_qcom-spmi-sdam.ko", "drivers/nvmem/nvmem_qfprom.ko", "drivers/pci/controller/pci-msm-drv.ko", "drivers/perf/qcom_llcc_pmu.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/pinctrl/qcom/pinctrl-neo.ko", + "drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko", + "drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko", "drivers/power/reset/qcom-dload-mode.ko", + "drivers/power/reset/qcom-pon.ko", "drivers/power/reset/qcom-reboot-reason.ko", + "drivers/power/reset/reboot-mode.ko", "drivers/regulator/debug-regulator.ko", "drivers/regulator/proxy-consumer.ko", "drivers/regulator/qcom_pm8008-regulator.ko", @@ -89,6 +101,7 @@ def define_neo_la(): "drivers/rpmsg/glink_pkt.ko", "drivers/rpmsg/qcom_glink.ko", "drivers/rpmsg/qcom_glink_smem.ko", + "drivers/rtc/rtc-pm8xxx.ko", "drivers/slimbus/slim-qcom-ngd-ctrl.ko", "drivers/slimbus/slimbus.ko", "drivers/soc/qcom/boot_stats.ko", @@ -137,8 +150,12 @@ def define_neo_la(): "drivers/soc/qcom/sys_pm_vx.ko", "drivers/soc/qcom/sysmon_subsystem_stats.ko", "drivers/spi/spi-msm-geni.ko", + "drivers/spmi/spmi-pmic-arb.ko", + "drivers/spmi/spmi-pmic-arb-debug.ko", "drivers/thermal/qcom/cpu_hotplug.ko", "drivers/thermal/qcom/ddr_cdev.ko", + "drivers/thermal/qcom/qcom-spmi-adc-tm5.ko", + "drivers/thermal/qcom/qcom-spmi-temp-alarm.ko", "drivers/thermal/qcom/qcom_tsens.ko", "drivers/thermal/qcom/qti_cpufreq_cdev.ko", "drivers/thermal/qcom/qti_devfreq_cdev.ko", diff --git a/niobe.bzl b/niobe.bzl index 24b83109d415..61fd487259da 100644 --- a/niobe.bzl +++ b/niobe.bzl @@ -87,7 +87,7 @@ def define_niobe(): "drivers/perf/qcom_llcc_pmu.ko", "drivers/phy/qualcomm/phy-qcom-ufs.ko", "drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4.ko", - "drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-pineapple.ko", + "drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-niobe.ko", "drivers/phy/qualcomm/phy-qcom-ufs-qrbtc-sdm845.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/pinctrl/qcom/pinctrl-niobe.ko", @@ -98,7 +98,7 @@ def define_niobe(): "drivers/power/reset/qcom-reboot-reason.ko", "drivers/power/reset/reboot-mode.ko", "drivers/power/supply/qti_battery_charger.ko", - "drivers/powercap/qti_epm_hardware.ko", + "drivers/powercap/qcom_power_telemetry.ko", "drivers/pwm/pwm-qcom.ko", "drivers/pwm/pwm-qti-lpg.ko", "drivers/regulator/ap72200-regulator.ko", diff --git a/seraph.bzl b/seraph.bzl index 4298e7d782e0..5f4eb9876cad 100644 --- a/seraph.bzl +++ b/seraph.bzl @@ -11,12 +11,23 @@ def define_seraph(): "drivers/clk/qcom/clk-dummy.ko", "drivers/clk/qcom/clk-qcom.ko", "drivers/clk/qcom/gdsc-regulator.ko", + "drivers/dma-buf/heaps/qcom_dma_heaps.ko", "drivers/firmware/qcom-scm.ko", "drivers/hwspinlock/qcom_hwspinlock.ko", + "drivers/iommu/arm/arm-smmu/arm_smmu.ko", + "drivers/iommu/iommu-logger.ko", + "drivers/iommu/msm_dma_iommu_mapping.ko", + "drivers/iommu/qcom_iommu_debug.ko", + "drivers/iommu/qcom_iommu_util.ko", "drivers/irqchip/qcom-pdc.ko", "drivers/pinctrl/qcom/pinctrl-msm.ko", "drivers/pinctrl/qcom/pinctrl-seraph.ko", + "drivers/regulator/stub-regulator.ko", "drivers/soc/qcom/cmd-db.ko", + "drivers/soc/qcom/mem-hooks.ko", + "drivers/soc/qcom/mem-offline.ko", + "drivers/soc/qcom/mem_buf/mem_buf.ko", + "drivers/soc/qcom/mem_buf/mem_buf_dev.ko", "drivers/soc/qcom/qcom_rpmh.ko", "drivers/soc/qcom/smem.ko", "drivers/soc/qcom/socinfo.ko", @@ -56,7 +67,7 @@ def define_seraph(): variant = variant, in_tree_module_list = mod_list, boot_image_opts = boot_image_opts( - earlycon_addr = "qcom_geni,0x00884000", + earlycon_addr = "qcom_geni,0xa94000", kernel_vendor_cmdline_extras = kernel_vendor_cmdline_extras, board_kernel_cmdline_extras = board_kernel_cmdline_extras, board_bootconfig_extras = board_bootconfig_extras,