Merge 0aa0194e05 on remote branch

Change-Id: I0510e8f20f8feb5d128d7b8ae21401bb8e774aac
This commit is contained in:
Linux Build Service Account 2024-11-06 22:33:40 -08:00
commit 9b1afe0287
112 changed files with 25192 additions and 1666 deletions

View file

@ -227,3 +227,7 @@ eipv4= [KNL] Sets ipv4 address at boot up for early ethernet.
eipv6= [KNL] Sets ipv6 address at boot up for early ethernet.
ermac= [KNL] Sets mac address at boot up for early ethernet.
board= [KNL] Sets Board type of device at boot up for phy detection.
enet= [KNL] Sets the PHY type on device at boot up for phy detection.

View file

@ -7096,4 +7096,8 @@
eipv6= [KNL] Sets ipv6 address at boot up for early ethernet.
ermac= [KNL] Sets mac address at boot up for early ethernet.
ermac= [KNL] Sets mac address at boot up for early ethernet.
board= [KNL] Sets Board type of device at boot up for phy detection.
enet= [KNL] Sets the PHY type on device at boot up for phy detection.

View file

@ -1,2 +1,2 @@
0a5333c8b52768abdbba7bfcffbf36fc761b4cad
android14-6.1-2024-08_r2
6f645aac97064a41a0bdcb18f1646427fd7ad6b9
android14-6.1-2024-08_r5

View file

@ -46,4 +46,32 @@ config ARCH_MDM9615
bool "Enable support for MDM9615"
select CLKSRC_QCOM
config ARCH_MDM9607
bool "Enable support for MDM9607"
select ARM_GIC
select CPU_V7
select REGULATOR
select REGULATOR_RPM_SMD
select HAVE_ARM_ARCH_TIMER
select MSM_RPM_SMD
select MEMORY_HOLE_CARVEOUT
select MSM_CORTEX_A7
select PINCTRL
select QCOM_GDSC
select USE_PINCTRL_IRQ
select MSM_IRQ
select MSM_PM if PM
select PM_DEVFREQ
select MSM_DEVFREQ_DEVBW
select MSM_BIMC_BWMON
select DEVFREQ_GOV_MSM_BW_HWMON
select HWSPINLOCK
select MTD_UBI
select HAVE_CLK_PREPARE
help
Enable support for MDM9607.
This enables support for MDM9607 SoC devicetree based systems.
If you do not wish to build a kernel that runs on this
chipset or if you are unsure, say 'N' here.
endif

View file

@ -5,6 +5,7 @@ CONFIG_ARM_SMMU=m
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
CONFIG_ARM_SMMU_QCOM=m
# CONFIG_ARM_SMMU_SELFTEST is not set
CONFIG_BOOTMARKER_PROXY=m
CONFIG_CFG80211=m
CONFIG_COMMON_CLK_QCOM=m
CONFIG_CRYPTO_DEV_QCOM_RNG=m
@ -124,7 +125,6 @@ CONFIG_QTI_QUIN_GVM=y
# CONFIG_QTI_THERMAL_MINIDUMP is not set
# CONFIG_R8188EU is not set
# CONFIG_R8712U is not set
CONFIG_REGULATOR_DEBUG_CONTROL=m
CONFIG_REGULATOR_STUB=m
CONFIG_RENAME_DEVICES=m
CONFIG_RPMSG_QCOM_GLINK=m

View file

@ -10,6 +10,7 @@ CONFIG_ARM_SMMU_QCOM=m
CONFIG_ARM_SMMU_TESTBUS=y
CONFIG_ARM_SMMU_TESTBUS_DUMP=y
CONFIG_ARM_SMMU_TESTBUS_DUMP_GEN3AUTO=y
CONFIG_BOOTMARKER_PROXY=m
CONFIG_CFG80211=m
CONFIG_CHR_DEV_SG=m
CONFIG_COMMON_CLK_QCOM=m

View file

@ -40,17 +40,26 @@ CONFIG_HWSPINLOCK_QCOM=m
CONFIG_I2C_EUSB2_REPEATER=m
CONFIG_I2C_MSM_GENI=m
CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_INPUT_PM8941_PWRKEY=m
# CONFIG_INPUT_PM8XXX_VIBRATOR is not set
# CONFIG_INPUT_QCOM_HV_HAPTICS is not set
CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m
CONFIG_INTERCONNECT_QCOM_DEBUG=m
CONFIG_INTERCONNECT_QCOM_NEO=m
CONFIG_INTERCONNECT_QCOM_QOS=m
CONFIG_INTERCONNECT_QCOM_RPMH=m
CONFIG_INTERCONNECT_TEST=m
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_IPC_LOGGING=m
CONFIG_IPC_LOG_MINIDUMP_BUFFERS=16
# CONFIG_LEDS_QPNP_FLASH_V2 is not set
# CONFIG_LEDS_QPNP_VIBRATOR_LDO is not set
# CONFIG_LEDS_QTI_FLASH is not set
# CONFIG_LEDS_QTI_TRI_LED is not set
CONFIG_LOCALVERSION="-gki"
CONFIG_MAC80211=m
CONFIG_MFD_I2C_PMIC=m
CONFIG_MFD_SPMI_PMIC=m
CONFIG_MHI_BUS=m
CONFIG_MHI_BUS_MISC=y
CONFIG_MHI_DTR=m
@ -68,13 +77,18 @@ CONFIG_MSM_RDBG=m
CONFIG_MSM_SYSSTATS=m
CONFIG_NL80211_TESTMODE=y
CONFIG_NVMEM_QCOM_QFPROM=m
CONFIG_NVMEM_SPMI_SDAM=m
CONFIG_PCI_MSM=m
CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_NEO=m
CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
# CONFIG_PM8916_WATCHDOG is not set
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y
CONFIG_POWER_RESET_QCOM_PON=m
CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m
# CONFIG_PWM_QTI_LPG is not set
CONFIG_QCOM_AOSS_QMP=m
CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_QCOM_BAM_DMA=m
@ -82,6 +96,7 @@ CONFIG_QCOM_BWMON=m
CONFIG_QCOM_BWPROF=m
CONFIG_QCOM_CDSP_RM=m
CONFIG_QCOM_CLK_RPMH=m
# CONFIG_QCOM_COINCELL is not set
CONFIG_QCOM_COMMAND_DB=m
CONFIG_QCOM_CPUSS_SLEEP_STATS=m
CONFIG_QCOM_CPU_VENDOR_HOOKS=m
@ -95,6 +110,7 @@ CONFIG_QCOM_DMABUF_HEAPS_CMA=y
CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y
# CONFIG_QCOM_EPM is not set
CONFIG_QCOM_EUD=m
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_GDSC_REGULATOR=m
@ -140,10 +156,15 @@ CONFIG_QCOM_SMP2P=m
CONFIG_QCOM_SMP2P_SLEEPSTATE=m
CONFIG_QCOM_SOCINFO=m
CONFIG_QCOM_SOC_WATCHDOG=m
CONFIG_QCOM_SPMI_ADC5=m
CONFIG_QCOM_SPMI_ADC_TM5=m
# CONFIG_QCOM_SPMI_RRADC is not set
CONFIG_QCOM_SPMI_TEMP_ALARM=m
CONFIG_QCOM_STATS=m
CONFIG_QCOM_SYSMON=m
CONFIG_QCOM_SYSMON_SUBSYSTEM_STATS=m
CONFIG_QCOM_TSENS=m
CONFIG_QCOM_VADC_COMMON=m
CONFIG_QCOM_VA_MINIDUMP=m
CONFIG_QCOM_WATCHDOG_BARK_TIME=11000
CONFIG_QCOM_WATCHDOG_IPI_PING=y
@ -169,6 +190,8 @@ CONFIG_QTI_QMI_COOLING_DEVICE=m
# CONFIG_QTI_QMI_SENSOR is not set
CONFIG_QTI_SYS_PM_VX=m
CONFIG_QTI_USERSPACE_CDEV=m
CONFIG_REBOOT_MODE=m
CONFIG_REGMAP_QTI_DEBUGFS=m
CONFIG_REGULATOR_DEBUG_CONTROL=m
CONFIG_REGULATOR_PROXY_CONSUMER=m
CONFIG_REGULATOR_QCOM_PM8008=m
@ -178,12 +201,16 @@ CONFIG_RPMSG_QCOM_GLINK=m
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_RPROC_SSR_NOTIF_TIMEOUT=20000
CONFIG_RPROC_SYSMON_NOTIF_TIMEOUT=20000
CONFIG_RTC_DRV_PM8XXX=m
CONFIG_SCHED_WALT=m
CONFIG_SERIAL_MSM_GENI=m
CONFIG_SLIMBUS=m
CONFIG_SLIM_QCOM_NGD_CTRL=m
# CONFIG_SND_USB_AUDIO_QMI is not set
CONFIG_SPI_MSM_GENI=m
CONFIG_SPMI_MSM_PMIC_ARB=m
CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=m
CONFIG_SPMI_PMIC_CLKDIV=m
CONFIG_SPS=m
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_STM=m

View file

@ -111,7 +111,7 @@ CONFIG_PHY_QCOM_UFS_QRBTC_SDM845=m
CONFIG_PHY_QCOM_UFS_V4=m
# CONFIG_PHY_QCOM_UFS_V4_BLAIR is not set
# CONFIG_PHY_QCOM_UFS_V4_KALAMA is not set
CONFIG_PHY_QCOM_UFS_V4_PINEAPPLE=m
CONFIG_PHY_QCOM_UFS_V4_NIOBE=m
# CONFIG_PHY_QCOM_UFS_V4_WAIPIO is not set
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_NIOBE=m
@ -147,7 +147,7 @@ CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y
# CONFIG_QCOM_DMABUF_HEAPS_TUI_CARVEOUT is not set
# CONFIG_QCOM_DMABUF_HEAPS_UBWCP is not set
# CONFIG_QCOM_DYN_MINIDUMP_STACK is not set
CONFIG_QCOM_EPM=m
# CONFIG_QCOM_EPM is not set
CONFIG_QCOM_EUD=m
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_FSA4480_I2C=m
@ -191,6 +191,7 @@ CONFIG_QCOM_PDC=m
CONFIG_QCOM_PDR_HELPERS=m
CONFIG_QCOM_PIL_INFO=m
CONFIG_QCOM_PMU_LIB=m
CONFIG_QCOM_POWER_TELEMETRY=m
CONFIG_QCOM_Q6V5_COMMON=m
CONFIG_QCOM_Q6V5_PAS=m
CONFIG_QCOM_QFPROM_SYS=m

View file

@ -1,15 +1,42 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SERAPH=y
CONFIG_ARM_SMMU=m
# CONFIG_ARM_SMMU_CAPTUREBUS_DEBUGFS is not set
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
CONFIG_ARM_SMMU_QCOM=m
# CONFIG_ARM_SMMU_QCOM_DEBUG is not set
# CONFIG_ARM_SMMU_SELFTEST is not set
# CONFIG_ARM_SMMU_TESTBUS is not set
CONFIG_COMMON_CLK_QCOM=m
CONFIG_HWSPINLOCK_QCOM=m
# CONFIG_IOMMU_TLBSYNC_DEBUG is not set
CONFIG_LOCALVERSION="-gki"
# CONFIG_MODULE_SIG_ALL is not set
# CONFIG_MSM_HAB is not set
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_SERAPH=m
CONFIG_QCOM_COMMAND_DB=m
CONFIG_QCOM_DMABUF_HEAPS=m
CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y
CONFIG_QCOM_DMABUF_HEAPS_CMA=y
CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y
# CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_MOVABLE is not set
# CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_UNCACHED is not set
# CONFIG_QCOM_DMABUF_HEAPS_TUI_CARVEOUT is not set
# CONFIG_QCOM_DMABUF_HEAPS_UBWCP is not set
CONFIG_QCOM_GDSC_REGULATOR=m
CONFIG_QCOM_IOMMU_DEBUG=m
CONFIG_QCOM_IOMMU_UTIL=m
CONFIG_QCOM_LAZY_MAPPING=m
CONFIG_QCOM_MEM_BUF=m
CONFIG_QCOM_MEM_BUF_DEV=m
CONFIG_QCOM_MEM_HOOKS=m
CONFIG_QCOM_MEM_OFFLINE=m
CONFIG_QCOM_PDC=m
CONFIG_QCOM_RPMH=m
CONFIG_QCOM_SCM=m
CONFIG_QCOM_SMEM=m
CONFIG_QCOM_SOCINFO=m
CONFIG_QTI_IOMMU_SUPPORT=m
CONFIG_REGULATOR_STUB=m

View file

@ -40,6 +40,7 @@ def define_autogvm():
"drivers/md/dm-bow.ko",
"drivers/media/platform/msm/npu/virtio_npu.ko",
"drivers/mfd/qcom-spmi-pmic.ko",
"drivers/misc/bootmarker_proxy.ko",
"drivers/misc/qseecom_proxy.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/sdhci-msm.ko",
@ -73,7 +74,6 @@ def define_autogvm():
"drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko",
"drivers/power/reset/msm-vm-poweroff.ko",
"drivers/regulator/debug-regulator.ko",
"drivers/regulator/stub-regulator.ko",
"drivers/regulator/virtio_regulator.ko",
"drivers/remoteproc/rproc_qcom_common.ko",

View file

@ -3773,6 +3773,7 @@ static struct clk_branch gcc_usb2_0_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb2_0_clkref_en",
.flags = CLK_DONT_HOLD_STATE,
.ops = &clk_branch2_ops,
},
},

View file

@ -3723,6 +3723,7 @@ static struct clk_branch gcc_usb2_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb2_clkref_en",
.flags = CLK_DONT_HOLD_STATE,
.ops = &clk_branch2_ops,
},
},

View file

@ -18,6 +18,23 @@ config CPU_FREQ
If in doubt, say N.
config CPU_FREQ_MSM
bool "CPU Frequency scaling"
select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
the lower the CPU clock speed, the less power the CPU consumes.
Note that this driver doesn't automatically change the CPU
clock speed, you need to either enable a dynamic cpufreq governor
(see below) after boot, or use a userspace tool.
For details, take a look at
<file:Documentation/admin-guide/pm/cpufreq.rst>.
If in doubt, say N.
if CPU_FREQ
config CPU_FREQ_GOV_ATTR_SET

View file

@ -97,6 +97,7 @@ obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
obj-$(CONFIG_ARM_TEGRA194_CPUFREQ) += tegra194-cpufreq.o
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_CPU_FREQ_MSM) += qcom-cpufreq.o
##################################################################################

View file

@ -0,0 +1,550 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
* Author: Mike A. Chan <mikechan@google.com>
*/
/* MSM architecture cpufreq driver */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/suspend.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/cpu_cooling.h>
#include <trace/events/power.h>
static DEFINE_MUTEX(l2bw_lock);
static struct thermal_cooling_device *cdev[NR_CPUS];
static struct clk *cpu_clk[NR_CPUS];
static struct clk *l2_clk;
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
static bool hotplug_ready;
struct cpufreq_suspend_t {
struct mutex suspend_mutex;
int device_suspended;
};
static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
static DEFINE_PER_CPU(int, cached_resolve_idx);
static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
#define CPUHP_QCOM_CPUFREQ_PREPARE CPUHP_AP_ONLINE_DYN
#define CPUHP_AP_QCOM_CPUFREQ_STARTING (CPUHP_AP_ONLINE_DYN + 1)
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
unsigned int index)
{
int ret = 0;
struct cpufreq_freqs freqs;
unsigned long rate;
freqs.old = policy->cur;
freqs.new = new_freq;
//trace_cpu_frequency_switch_start(freqs.old, freqs.new, policy->cpu);
cpufreq_freq_transition_begin(policy, &freqs);
rate = new_freq * 1000;
rate = clk_round_rate(cpu_clk[policy->cpu], rate);
ret = clk_set_rate(cpu_clk[policy->cpu], rate);
cpufreq_freq_transition_end(policy, &freqs, ret);
if (!ret) {
arch_set_freq_scale(policy->related_cpus, new_freq,
policy->cpuinfo.max_freq);
//trace_cpu_frequency_switch_end(policy->cpu);
}
return ret;
}
static int msm_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret = 0;
int index;
struct cpufreq_frequency_table *table;
int first_cpu = cpumask_first(policy->related_cpus);
mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
if (target_freq == policy->cur)
goto done;
if (per_cpu(suspend_data, policy->cpu).device_suspended) {
pr_debug("cpufreq: cpu%d scheduling frequency change in suspend\n",
policy->cpu);
ret = -EFAULT;
goto done;
}
table = policy->freq_table;
if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
index = per_cpu(cached_resolve_idx, first_cpu);
else
index = cpufreq_frequency_table_target(policy, target_freq,
relation);
pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
policy->cpu, target_freq, relation,
policy->min, policy->max, table[index].frequency);
ret = set_cpu_freq(policy, table[index].frequency,
table[index].driver_data);
done:
mutex_unlock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
return ret;
}
static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
int index;
int first_cpu = cpumask_first(policy->related_cpus);
unsigned int freq;
index = cpufreq_frequency_table_target(policy, target_freq,
CPUFREQ_RELATION_L);
freq = policy->freq_table[index].frequency;
per_cpu(cached_resolve_idx, first_cpu) = index;
per_cpu(cached_resolve_freq, first_cpu) = freq;
return freq;
}
static int msm_cpufreq_verify(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
{
return clk_get_rate(cpu_clk[cpu]) / 1000;
}
static int msm_cpufreq_init(struct cpufreq_policy *policy)
{
int cur_freq;
int index;
int ret = 0;
struct cpufreq_frequency_table *table =
per_cpu(freq_table, policy->cpu);
int cpu;
/*
* In some SoC, some cores are clocked by same source, and their
* frequencies can not be changed independently. Find all other
* CPUs that share same clock, and mark them as controlled by
* same policy.
*/
for_each_possible_cpu(cpu)
if (cpu_clk[cpu] == cpu_clk[policy->cpu])
cpumask_set_cpu(cpu, policy->cpus);
policy->freq_table = table;
ret = cpufreq_table_validate_and_sort(policy);
if (ret) {
pr_err("cpufreq: failed to get policy min/max\n");
return ret;
}
cur_freq = clk_get_rate(cpu_clk[policy->cpu])/1000;
index = cpufreq_frequency_table_target(policy, cur_freq,
CPUFREQ_RELATION_H);
/*
* Call set_cpu_freq unconditionally so that when cpu is set to
* online, frequency limit will always be updated.
*/
ret = set_cpu_freq(policy, table[index].frequency,
table[index].driver_data);
if (ret)
return ret;
pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
policy->cpu, cur_freq, table[index].frequency);
policy->cur = table[index].frequency;
policy->dvfs_possible_from_any_cpu = true;
return 0;
}
static int qcom_cpufreq_dead_cpu(unsigned int cpu)
{
/* Fail hotplug until this driver can get CPU clocks */
if (!hotplug_ready)
return -EINVAL;
clk_unprepare(cpu_clk[cpu]);
clk_unprepare(l2_clk);
return 0;
}
static int qcom_cpufreq_up_cpu(unsigned int cpu)
{
int rc;
/* Fail hotplug until this driver can get CPU clocks */
if (!hotplug_ready)
return -EINVAL;
rc = clk_prepare(l2_clk);
if (rc < 0)
return rc;
rc = clk_prepare(cpu_clk[cpu]);
if (rc < 0)
clk_unprepare(l2_clk);
return rc;
}
static int qcom_cpufreq_dying_cpu(unsigned int cpu)
{
/* Fail hotplug until this driver can get CPU clocks */
if (!hotplug_ready)
return -EINVAL;
clk_disable(cpu_clk[cpu]);
clk_disable(l2_clk);
return 0;
}
static int qcom_cpufreq_starting_cpu(unsigned int cpu)
{
int rc;
/* Fail hotplug until this driver can get CPU clocks */
if (!hotplug_ready)
return -EINVAL;
rc = clk_enable(l2_clk);
if (rc < 0)
return rc;
rc = clk_enable(cpu_clk[cpu]);
if (rc < 0)
clk_disable(l2_clk);
return rc;
}
static int msm_cpufreq_suspend(void)
{
int cpu;
for_each_possible_cpu(cpu) {
mutex_lock(&per_cpu(suspend_data, cpu).suspend_mutex);
per_cpu(suspend_data, cpu).device_suspended = 1;
mutex_unlock(&per_cpu(suspend_data, cpu).suspend_mutex);
}
return NOTIFY_DONE;
}
static int msm_cpufreq_resume(void)
{
int cpu, ret;
struct cpufreq_policy policy;
for_each_possible_cpu(cpu) {
per_cpu(suspend_data, cpu).device_suspended = 0;
}
/*
* Freq request might be rejected during suspend, resulting
* in policy->cur violating min/max constraint.
* Correct the frequency as soon as possible.
*/
cpus_read_lock();
for_each_online_cpu(cpu) {
ret = cpufreq_get_policy(&policy, cpu);
if (ret)
continue;
if (policy.cur <= policy.max && policy.cur >= policy.min)
continue;
cpufreq_update_policy(cpu);
}
cpus_read_unlock();
return NOTIFY_DONE;
}
static int msm_cpufreq_pm_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
switch (event) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
return msm_cpufreq_resume();
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
return msm_cpufreq_suspend();
default:
return NOTIFY_DONE;
}
}
static struct notifier_block msm_cpufreq_pm_notifier = {
.notifier_call = msm_cpufreq_pm_event,
};
static struct freq_attr *msm_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static void msm_cpufreq_ready(struct cpufreq_policy *policy)
{
struct device_node *np;
unsigned int cpu = policy->cpu;
if (cdev[cpu])
return;
np = of_cpu_device_node_get(cpu);
if (WARN_ON(!np))
return;
/*
* For now, just loading the cooling device;
* thermal DT code takes care of matching them.
*/
if (of_find_property(np, "#cooling-cells", NULL)) {
cdev[cpu] = of_cpufreq_cooling_register(policy);
if (IS_ERR(cdev[cpu])) {
pr_err("running cpufreq for CPU%d without cooling dev: %ld\n",
cpu, PTR_ERR(cdev[cpu]));
cdev[cpu] = NULL;
}
}
of_node_put(np);
}
static struct cpufreq_driver msm_cpufreq_driver = {
/* lps calculations are handled here. */
.flags = CPUFREQ_NEED_UPDATE_LIMITS | CPUFREQ_CONST_LOOPS |
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
.fast_switch = msm_cpufreq_resolve_freq,
.get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
.ready = msm_cpufreq_ready,
};
static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
char *tbl_name, int cpu)
{
int ret, nf, i, j;
u32 *data;
struct cpufreq_frequency_table *ftbl;
/* Parse list of usable CPU frequencies. */
if (!of_find_property(dev->of_node, tbl_name, &nf))
return ERR_PTR(-EINVAL);
nf /= sizeof(*data);
if (nf == 0)
return ERR_PTR(-EINVAL);
data = kcalloc(nf, sizeof(*data), GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
ret = of_property_read_u32_array(dev->of_node, tbl_name, data, nf);
if (ret)
return ERR_PTR(ret);
ftbl = kcalloc((nf + 1), sizeof(*ftbl), GFP_KERNEL);
if (!ftbl)
return ERR_PTR(-ENOMEM);
j = 0;
for (i = 0; i < nf; i++) {
unsigned long f;
f = clk_round_rate(cpu_clk[cpu], data[i] * 1000);
if (IS_ERR_VALUE(f))
break;
f /= 1000;
/*
* Don't repeat frequencies if they round up to the same clock
* frequency.
*
*/
if (j > 0 && f <= ftbl[j - 1].frequency)
continue;
ftbl[j].driver_data = j;
ftbl[j].frequency = f;
j++;
}
ftbl[j].driver_data = j;
ftbl[j].frequency = CPUFREQ_TABLE_END;
kfree(data);
return ftbl;
}
static int msm_cpufreq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
char clk_name[] = "cpu??_clk";
char tbl_name[] = "qcom,cpufreq-table-??";
struct clk *c;
int cpu, ret;
struct cpufreq_frequency_table *ftbl;
l2_clk = devm_clk_get(dev, "l2_clk");
if (IS_ERR(l2_clk))
l2_clk = NULL;
for_each_possible_cpu(cpu) {
snprintf(clk_name, sizeof(clk_name), "cpu%d_clk", cpu);
c = devm_clk_get(dev, clk_name);
if (cpu == 0 && IS_ERR(c))
return PTR_ERR(c);
else if (IS_ERR(c))
c = cpu_clk[cpu-1];
cpu_clk[cpu] = c;
}
hotplug_ready = true;
/* Use per-policy governor tunable for some targets */
if (of_property_read_bool(dev->of_node, "qcom,governor-per-policy"))
msm_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
/* Parse commong cpufreq table for all CPUs */
ftbl = cpufreq_parse_dt(dev, "qcom,cpufreq-table", 0);
if (!IS_ERR(ftbl)) {
for_each_possible_cpu(cpu)
per_cpu(freq_table, cpu) = ftbl;
goto out_register;
}
/*
* No common table. Parse individual tables for each unique
* CPU clock.
*/
for_each_possible_cpu(cpu) {
snprintf(tbl_name, sizeof(tbl_name),
"qcom,cpufreq-table-%d", cpu);
ftbl = cpufreq_parse_dt(dev, tbl_name, cpu);
/* CPU0 must contain freq table */
if (cpu == 0 && IS_ERR(ftbl)) {
dev_err(dev, "Failed to parse CPU0's freq table\n");
return PTR_ERR(ftbl);
}
if (cpu == 0) {
per_cpu(freq_table, cpu) = ftbl;
continue;
}
if (cpu_clk[cpu] != cpu_clk[cpu - 1] && IS_ERR(ftbl)) {
dev_err(dev, "Failed to parse CPU%d's freq table\n",
cpu);
return PTR_ERR(ftbl);
}
/* Use previous CPU's table if it shares same clock */
if (cpu_clk[cpu] == cpu_clk[cpu - 1]) {
if (!IS_ERR(ftbl)) {
dev_warn(dev, "Conflicting tables for CPU%d\n",
cpu);
kfree(ftbl);
}
ftbl = per_cpu(freq_table, cpu - 1);
}
per_cpu(freq_table, cpu) = ftbl;
}
out_register:
ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
if (ret)
return ret;
ret = cpufreq_register_driver(&msm_cpufreq_driver);
if (ret)
unregister_pm_notifier(&msm_cpufreq_pm_notifier);
else
dev_err(dev, "Probe successful\n");
return ret;
}
static const struct of_device_id msm_cpufreq_match_table[] = {
{ .compatible = "qcom,msm-cpufreq" },
{}
};
static struct platform_driver msm_cpufreq_plat_driver = {
.probe = msm_cpufreq_probe,
.driver = {
.name = "msm-cpufreq",
.of_match_table = msm_cpufreq_match_table,
},
};
static int __init msm_cpufreq_register(void)
{
int cpu, rc;
for_each_possible_cpu(cpu) {
mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
per_cpu(suspend_data, cpu).device_suspended = 0;
per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
}
rc = platform_driver_register(&msm_cpufreq_plat_driver);
if (rc < 0) {
/* Unblock hotplug if msm-cpufreq probe fails */
cpuhp_remove_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE);
cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
for_each_possible_cpu(cpu)
mutex_destroy(&(per_cpu(suspend_data, cpu).
suspend_mutex));
return rc;
}
return 0;
}
subsys_initcall(msm_cpufreq_register);
static int __init msm_cpufreq_early_register(void)
{
int ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING,
"AP_QCOM_CPUFREQ_STARTING",
qcom_cpufreq_starting_cpu,
qcom_cpufreq_dying_cpu);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_QCOM_CPUFREQ_PREPARE,
"QCOM_CPUFREQ_PREPARE",
qcom_cpufreq_up_cpu,
qcom_cpufreq_dead_cpu);
if (!ret)
return ret;
cpuhp_remove_state_nocalls(CPUHP_AP_QCOM_CPUFREQ_STARTING);
return ret;
}
core_initcall(msm_cpufreq_early_register);

View file

@ -84,6 +84,14 @@ config DEVFREQ_GOV_QCOM_BW_HWMON
can conflict with existing profiling tools. This governor is unlikely
to be useful for non-QCOM devices.
config DEVFREQ_GOV_CPUFREQ
tristate "CPUfreq"
depends on CPU_FREQ
help
Chooses frequency based on the online CPUs' current frequency and a
CPU frequency to device frequency mapping table(s). This governor
can be useful for controlling devices such as DDR, cache, CCI, etc.
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ

View file

@ -7,6 +7,7 @@ obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON) += governor_bw_hwmon.o
obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ) += governor_cpufreq.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o

View file

@ -0,0 +1,359 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2014, 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "devfreq-icc: " fmt
#include <linux/delay.h>
#include <linux/devfreq.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_fdt.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <soc/qcom/devfreq_icc.h>
#include <soc/qcom/of_common.h>
#include <trace/events/power.h>
/* Has to be UL to avoid errors in 32 bit. Use cautiously to avoid overflows.*/
#define MBYTE (1UL << 20)
#define HZ_TO_MBPS(hz, w) (mult_frac(w, hz, MBYTE))
#define MBPS_TO_HZ(mbps, w) (mult_frac(mbps, MBYTE, w))
#define MBPS_TO_ICC(mbps) (mult_frac(mbps, MBYTE, 1000))
enum dev_type {
STD_MBPS_DEV,
L3_HZ_DEV,
L3_MBPS_DEV,
NUM_DEV_TYPES
};
struct devfreq_icc_spec {
enum dev_type type;
};
struct dev_data {
struct icc_path *icc_path;
u32 cur_ab;
u32 cur_ib;
unsigned long gov_ab;
const struct devfreq_icc_spec *spec;
unsigned int width;
struct devfreq *df;
struct devfreq_dev_profile dp;
};
#define MAX_L3_ENTRIES 40U
static unsigned long l3_freqs[MAX_L3_ENTRIES];
static DEFINE_MUTEX(l3_freqs_lock);
static bool use_cached_l3_freqs;
static u64 mbps_to_hz_icc(u32 in, uint width)
{
u64 result;
u32 quot = in / width;
u32 rem = in % width;
result = quot * MBYTE + div_u64(rem * MBYTE, width);
return result;
}
static int set_bw(struct device *dev, u32 new_ib, u32 new_ab)
{
struct dev_data *d = dev_get_drvdata(dev);
int ret;
u64 icc_ib = new_ib, icc_ab = new_ab;
if (d->cur_ib == new_ib && d->cur_ab == new_ab)
return 0;
if (d->spec->type == L3_MBPS_DEV) {
icc_ib = mbps_to_hz_icc(new_ib, d->width);
icc_ab = mbps_to_hz_icc(new_ab, d->width);
} else if (d->spec->type == STD_MBPS_DEV) {
icc_ib = mbps_to_hz_icc(new_ib, 1000);
icc_ab = mbps_to_hz_icc(new_ab, 1000);
}
dev_dbg(dev, "ICC BW: AB: %llu IB: %llu\n", icc_ab, icc_ib);
ret = icc_set_bw(d->icc_path, icc_ab, icc_ib);
if (ret < 0) {
dev_err(dev, "icc set bandwidth request failed (%d)\n", ret);
} else {
d->cur_ib = new_ib;
d->cur_ab = new_ab;
}
return ret;
}
static int icc_target(struct device *dev, unsigned long *freq, u32 flags)
{
struct dev_data *d = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
opp = devfreq_recommended_opp(dev, freq, flags);
if (!IS_ERR(opp))
dev_pm_opp_put(opp);
return set_bw(dev, *freq, d->gov_ab);
}
static int icc_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
struct dev_data *d = dev_get_drvdata(dev);
stat->private_data = &d->gov_ab;
return 0;
}
#define INIT_HZ 300000000UL
#define XO_HZ 19200000UL
#define FTBL_ROW_SIZE 4
#define SRC_MASK GENMASK(31, 30)
#define SRC_SHIFT 30
#define MULT_MASK GENMASK(7, 0)
static int populate_l3_opp_table(struct device *dev)
{
struct dev_data *d = dev_get_drvdata(dev);
int idx, ret;
u32 data, src, mult, i;
unsigned long freq, prev_freq = 0;
struct resource res;
void __iomem *ftbl_base;
unsigned int ftbl_row_size = FTBL_ROW_SIZE;
idx = of_property_match_string(dev->of_node, "reg-names", "ftbl-base");
if (idx < 0) {
dev_err(dev, "Unable to find ftbl-base: %d\n", idx);
return -EINVAL;
}
ret = of_address_to_resource(dev->of_node, idx, &res);
if (ret < 0) {
dev_err(dev, "Unable to get resource from address: %d\n", ret);
return -EINVAL;
}
ftbl_base = ioremap(res.start, resource_size(&res));
if (!ftbl_base) {
dev_err(dev, "Unable to map ftbl-base!\n");
return -ENOMEM;
}
of_property_read_u32(dev->of_node, "qcom,ftbl-row-size",
&ftbl_row_size);
for (i = 0; i < MAX_L3_ENTRIES; i++) {
data = readl_relaxed(ftbl_base + i * ftbl_row_size);
src = ((data & SRC_MASK) >> SRC_SHIFT);
mult = (data & MULT_MASK);
freq = src ? XO_HZ * mult : INIT_HZ;
/* Two of the same frequencies means end of table */
if (i > 0 && prev_freq == freq)
break;
if (d->spec->type == L3_MBPS_DEV)
dev_pm_opp_add(dev, HZ_TO_MBPS(freq, d->width), 0);
else
dev_pm_opp_add(dev, freq, 0);
l3_freqs[i] = freq;
prev_freq = freq;
}
iounmap(ftbl_base);
use_cached_l3_freqs = true;
return 0;
}
static int copy_l3_opp_table(struct device *dev)
{
struct dev_data *d = dev_get_drvdata(dev);
int idx;
for (idx = 0; idx < MAX_L3_ENTRIES; idx++) {
if (!l3_freqs[idx])
break;
if (d->spec->type == L3_MBPS_DEV)
dev_pm_opp_add(dev,
HZ_TO_MBPS(l3_freqs[idx], d->width), 0);
else
dev_pm_opp_add(dev, l3_freqs[idx], 0);
}
if (!idx) {
dev_err(dev, "No L3 frequencies copied for device!\n");
return -EINVAL;
}
return 0;
}
#define PROP_ACTIVE "qcom,active-only"
#define ACTIVE_ONLY_TAG 0x3
int devfreq_add_icc(struct device *dev)
{
struct dev_data *d;
struct devfreq_dev_profile *p;
const char *gov_name;
int ret;
struct opp_table *opp_table;
u32 version;
d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
dev_set_drvdata(dev, d);
d->spec = of_device_get_match_data(dev);
if (!d->spec) {
dev_err(dev, "Unknown device type!\n");
return -ENODEV;
}
p = &d->dp;
p->polling_ms = 500;
p->target = icc_target;
p->get_dev_status = icc_get_dev_status;
if (of_device_is_compatible(dev->of_node, "qcom,devfreq-icc-ddr")) {
version = (1 << of_fdt_get_ddrtype());
opp_table = dev_pm_opp_get_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_err(dev, "Failed to set supported hardware\n");
return PTR_ERR(opp_table);
}
}
if (d->spec->type == L3_MBPS_DEV) {
ret = of_property_read_u32(dev->of_node, "qcom,bus-width",
&d->width);
if (ret < 0 || !d->width) {
dev_err(dev, "Missing or invalid bus-width: %d\n", ret);
return -EINVAL;
}
}
if (d->spec->type == L3_HZ_DEV || d->spec->type == L3_MBPS_DEV) {
mutex_lock(&l3_freqs_lock);
if (use_cached_l3_freqs) {
mutex_unlock(&l3_freqs_lock);
ret = copy_l3_opp_table(dev);
} else {
ret = populate_l3_opp_table(dev);
mutex_unlock(&l3_freqs_lock);
}
} else {
ret = dev_pm_opp_of_add_table(dev);
}
if (ret < 0)
dev_err(dev, "Couldn't parse OPP table:%d\n", ret);
d->icc_path = of_icc_get(dev, NULL);
if (IS_ERR(d->icc_path)) {
ret = PTR_ERR(d->icc_path);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Unable to register icc path: %d\n", ret);
return ret;
}
if (of_property_read_bool(dev->of_node, PROP_ACTIVE))
icc_set_tag(d->icc_path, ACTIVE_ONLY_TAG);
if (of_property_read_string(dev->of_node, "governor", &gov_name))
gov_name = "performance";
d->df = devfreq_add_device(dev, p, gov_name, NULL);
if (IS_ERR(d->df)) {
icc_put(d->icc_path);
return PTR_ERR(d->df);
}
return 0;
}
int devfreq_remove_icc(struct device *dev)
{
struct dev_data *d = dev_get_drvdata(dev);
icc_put(d->icc_path);
devfreq_remove_device(d->df);
return 0;
}
int devfreq_suspend_icc(struct device *dev)
{
struct dev_data *d = dev_get_drvdata(dev);
return devfreq_suspend_device(d->df);
}
int devfreq_resume_icc(struct device *dev)
{
struct dev_data *d = dev_get_drvdata(dev);
return devfreq_resume_device(d->df);
}
static int devfreq_icc_probe(struct platform_device *pdev)
{
return devfreq_add_icc(&pdev->dev);
}
static int devfreq_icc_remove(struct platform_device *pdev)
{
return devfreq_remove_icc(&pdev->dev);
}
static const struct devfreq_icc_spec spec[] = {
[0] = { STD_MBPS_DEV },
[1] = { L3_HZ_DEV },
[2] = { L3_MBPS_DEV },
};
static const struct of_device_id devfreq_icc_match_table[] = {
{ .compatible = "qcom,devfreq-icc-l3bw", .data = &spec[2] },
{ .compatible = "qcom,devfreq-icc-l3", .data = &spec[1] },
{ .compatible = "qcom,devfreq-icc-llcc", .data = &spec[0] },
{ .compatible = "qcom,devfreq-icc-ddr", .data = &spec[0] },
{ .compatible = "qcom,devfreq-icc", .data = &spec[0] },
{}
};
static struct platform_driver devfreq_icc_driver = {
.probe = devfreq_icc_probe,
.remove = devfreq_icc_remove,
.driver = {
.name = "devfreq-icc",
.of_match_table = devfreq_icc_match_table,
.suppress_bind_attrs = true,
},
};
module_platform_driver(devfreq_icc_driver);
MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,729 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "dev-cpufreq: " fmt
#include <linux/devfreq.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/module.h>
#include "governor.h"
struct cpu_state {
unsigned int freq;
unsigned int min_freq;
unsigned int max_freq;
bool on;
unsigned int first_cpu;
};
static struct cpu_state *state[NR_CPUS];
static int cpufreq_cnt;
struct freq_map {
unsigned int cpu_khz;
unsigned int target_freq;
};
struct devfreq_node {
struct devfreq *df;
void *orig_data;
struct device *dev;
struct device_node *of_node;
struct list_head list;
struct freq_map **map;
struct freq_map *common_map;
unsigned int timeout;
struct delayed_work dwork;
bool drop;
unsigned long prev_tgt;
};
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(state_lock);
static DEFINE_MUTEX(cpufreq_reg_lock);
#define show_attr(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct devfreq *df = to_devfreq(dev); \
struct devfreq_node *n = df->data; \
return scnprintf(buf, PAGE_SIZE, "%u\n", n->name); \
}
#define store_attr(name, _min, _max) \
static ssize_t name##_store(struct device *dev, \
struct device_attribute *attr, const char *buf, \
size_t count) \
{ \
struct devfreq *df = to_devfreq(dev); \
struct devfreq_node *n = df->data; \
int ret; \
unsigned int val; \
ret = kstrtoint(buf, 10, &val); \
if (ret) \
return ret; \
val = max(val, _min); \
val = min(val, _max); \
n->name = val; \
return count; \
}
static int update_node(struct devfreq_node *node)
{
int ret;
struct devfreq *df = node->df;
if (!df)
return 0;
cancel_delayed_work_sync(&node->dwork);
mutex_lock(&df->lock);
node->drop = false;
ret = update_devfreq(df);
if (ret) {
dev_err(df->dev.parent, "Unable to update frequency\n");
goto out;
}
if (!node->timeout)
goto out;
if (df->previous_freq <= df->scaling_min_freq)
goto out;
schedule_delayed_work(&node->dwork,
msecs_to_jiffies(node->timeout));
out:
mutex_unlock(&df->lock);
return ret;
}
static void update_all_devfreqs(void)
{
struct devfreq_node *node;
list_for_each_entry(node, &devfreq_list, list) {
update_node(node);
}
}
static void do_timeout(struct work_struct *work)
{
struct devfreq_node *node = container_of(to_delayed_work(work),
struct devfreq_node, dwork);
struct devfreq *df = node->df;
mutex_lock(&df->lock);
node->drop = true;
update_devfreq(df);
mutex_unlock(&df->lock);
}
static struct devfreq_node *find_devfreq_node(struct device *dev)
{
struct devfreq_node *node;
list_for_each_entry(node, &devfreq_list, list)
if (node->dev == dev || node->of_node == dev->of_node)
return node;
return NULL;
}
/* ==================== cpufreq part ==================== */
static void add_policy(struct cpufreq_policy *policy)
{
struct cpu_state *new_state;
unsigned int cpu, first_cpu;
if (state[policy->cpu]) {
state[policy->cpu]->freq = policy->cur;
state[policy->cpu]->on = true;
} else {
new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
if (!new_state)
return;
first_cpu = cpumask_first(policy->related_cpus);
new_state->first_cpu = first_cpu;
new_state->freq = policy->cur;
new_state->min_freq = policy->cpuinfo.min_freq;
new_state->max_freq = policy->cpuinfo.max_freq;
new_state->on = true;
for_each_cpu(cpu, policy->related_cpus)
state[cpu] = new_state;
}
}
static int cpufreq_trans_notifier(struct notifier_block *nb,
unsigned long event, void *data)
{
struct cpufreq_freqs *freq = data;
struct cpu_state *s;
if (event != CPUFREQ_POSTCHANGE)
return 0;
mutex_lock(&state_lock);
s = state[freq->policy->cpu];
if (!s)
goto out;
if (s->freq != freq->new) {
s->freq = freq->new;
update_all_devfreqs();
}
out:
mutex_unlock(&state_lock);
return 0;
}
static struct notifier_block cpufreq_trans_nb = {
.notifier_call = cpufreq_trans_notifier
};
static int devfreq_cpufreq_hotplug_coming_up(unsigned int cpu)
{
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(cpu);
if (!policy) {
pr_err("Policy is null for cpu =%d\n", cpu);
return 0;
}
mutex_lock(&state_lock);
add_policy(policy);
update_all_devfreqs();
mutex_unlock(&state_lock);
return 0;
}
static int devfreq_cpufreq_hotplug_going_down(unsigned int cpu)
{
struct cpufreq_policy *policy;
policy = cpufreq_cpu_get(cpu);
if (!policy) {
pr_err("Policy is null for cpu =%d\n", cpu);
return 0;
}
mutex_lock(&state_lock);
if (state[policy->cpu]) {
state[policy->cpu]->on = false;
update_all_devfreqs();
}
mutex_unlock(&state_lock);
return 0;
}
static int devfreq_cpufreq_cpu_hp_init(void)
{
int ret = 0;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"DEVFREQ_CPUFREQ",
devfreq_cpufreq_hotplug_coming_up,
devfreq_cpufreq_hotplug_going_down);
if (ret < 0) {
cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
pr_err("devfreq-cpufreq: failed to register HP notifier: %d\n",
ret);
} else
ret = 0;
return ret;
}
static int register_cpufreq(void)
{
int ret = 0;
unsigned int cpu;
struct cpufreq_policy *policy;
mutex_lock(&cpufreq_reg_lock);
if (cpufreq_cnt)
goto cnt_not_zero;
cpus_read_lock();
ret = devfreq_cpufreq_cpu_hp_init();
if (ret < 0)
goto out;
ret = cpufreq_register_notifier(&cpufreq_trans_nb,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret)
goto out;
for_each_online_cpu(cpu) {
policy = cpufreq_cpu_get(cpu);
if (policy) {
add_policy(policy);
cpufreq_cpu_put(policy);
}
}
out:
cpus_read_unlock();
cnt_not_zero:
if (!ret)
cpufreq_cnt++;
mutex_unlock(&cpufreq_reg_lock);
return ret;
}
static int unregister_cpufreq(void)
{
int cpu;
mutex_lock(&cpufreq_reg_lock);
if (cpufreq_cnt > 1)
goto out;
cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
cpufreq_unregister_notifier(&cpufreq_trans_nb,
CPUFREQ_TRANSITION_NOTIFIER);
for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
if (!state[cpu])
continue;
if (state[cpu]->first_cpu == cpu)
kfree(state[cpu]);
state[cpu] = NULL;
}
out:
cpufreq_cnt--;
mutex_unlock(&cpufreq_reg_lock);
return 0;
}
/* ==================== devfreq part ==================== */
static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
{
unsigned long *freq_table = df->profile->freq_table;
unsigned int cpu_min = state[cpu]->min_freq;
unsigned int cpu_max = state[cpu]->max_freq;
unsigned int cpu_freq = state[cpu]->freq;
unsigned int dev_min, dev_max, cpu_percent;
if (freq_table) {
dev_min = freq_table[0];
dev_max = freq_table[df->profile->max_state - 1];
} else {
if (df->scaling_max_freq <= df->scaling_min_freq)
return 0;
dev_min = df->scaling_min_freq;
dev_max = df->scaling_max_freq;
}
cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
}
static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
{
struct freq_map *map = NULL;
unsigned int cpu_khz = 0, freq;
struct devfreq_node *n = df->data;
if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
freq = 0;
goto out;
}
if (n->common_map)
map = n->common_map;
else if (n->map)
map = n->map[cpu];
cpu_khz = state[cpu]->freq;
if (!map) {
freq = interpolate_freq(df, cpu);
goto out;
}
while (map->cpu_khz && map->cpu_khz < cpu_khz)
map++;
if (!map->cpu_khz)
map--;
freq = map->target_freq;
out:
dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
return freq;
}
static int devfreq_cpufreq_get_freq(struct devfreq *df,
unsigned long *freq)
{
unsigned int cpu, tgt_freq = 0;
struct devfreq_node *node;
node = df->data;
if (!node) {
pr_err("Unable to find devfreq node!\n");
return -ENODEV;
}
if (node->drop) {
*freq = 0;
return 0;
}
for_each_possible_cpu(cpu)
tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
if (node->timeout && tgt_freq < node->prev_tgt)
*freq = 0;
else
*freq = tgt_freq;
node->prev_tgt = tgt_freq;
return 0;
}
static unsigned int show_table(char *buf, unsigned int len,
struct freq_map *map)
{
unsigned int cnt = 0;
cnt += scnprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
while (map->cpu_khz && cnt < len) {
cnt += scnprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
map->cpu_khz, map->target_freq);
map++;
}
if (cnt < len)
cnt += scnprintf(buf + cnt, len - cnt, "\n");
return cnt;
}
static ssize_t freq_map_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct devfreq *df = to_devfreq(dev);
struct devfreq_node *n = df->data;
struct freq_map *map;
unsigned int cnt = 0, cpu;
mutex_lock(&state_lock);
if (n->common_map) {
map = n->common_map;
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
"Common table for all CPUs:\n");
cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
} else if (n->map) {
for_each_possible_cpu(cpu) {
map = n->map[cpu];
if (!map)
continue;
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
"CPU %u:\n", cpu);
if (cnt >= PAGE_SIZE)
break;
cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
if (cnt >= PAGE_SIZE)
break;
}
} else {
cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt,
"Device freq interpolated based on CPU freq\n");
}
mutex_unlock(&state_lock);
return cnt;
}
static DEVICE_ATTR_RO(freq_map);
show_attr(timeout);
store_attr(timeout, 0U, 100U);
static DEVICE_ATTR_RW(timeout);
static struct attribute *dev_attr[] = {
&dev_attr_freq_map.attr,
&dev_attr_timeout.attr,
NULL,
};
static struct attribute_group dev_attr_group = {
.name = "cpufreq",
.attrs = dev_attr,
};
static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
{
int ret = 0;
struct devfreq_node *node;
bool alloc = false;
ret = register_cpufreq();
if (ret)
return ret;
ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
if (ret) {
unregister_cpufreq();
return ret;
}
mutex_lock(&state_lock);
node = find_devfreq_node(devfreq->dev.parent);
if (node == NULL) {
node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
if (!node) {
ret = -ENOMEM;
goto alloc_fail;
}
alloc = true;
node->dev = devfreq->dev.parent;
list_add_tail(&node->list, &devfreq_list);
}
INIT_DELAYED_WORK(&node->dwork, do_timeout);
node->df = devfreq;
node->orig_data = devfreq->data;
devfreq->data = node;
ret = update_node(node);
if (ret)
goto update_fail;
mutex_unlock(&state_lock);
return 0;
update_fail:
devfreq->data = node->orig_data;
if (alloc) {
list_del(&node->list);
kfree(node);
}
alloc_fail:
mutex_unlock(&state_lock);
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
unregister_cpufreq();
return ret;
}
static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
{
struct devfreq_node *node = devfreq->data;
cancel_delayed_work_sync(&node->dwork);
mutex_lock(&state_lock);
devfreq->data = node->orig_data;
if (node->map || node->common_map) {
node->df = NULL;
} else {
list_del(&node->list);
kfree(node);
}
mutex_unlock(&state_lock);
sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
unregister_cpufreq();
}
static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
int ret;
switch (event) {
case DEVFREQ_GOV_START:
ret = devfreq_cpufreq_gov_start(devfreq);
if (ret) {
pr_err("Governor start failed!\n");
return ret;
}
pr_debug("Enabled dev CPUfreq governor\n");
break;
case DEVFREQ_GOV_STOP:
devfreq_cpufreq_gov_stop(devfreq);
pr_debug("Disabled dev CPUfreq governor\n");
break;
}
return 0;
}
static struct devfreq_governor devfreq_cpufreq = {
.name = "cpufreq",
.get_target_freq = devfreq_cpufreq_get_freq,
.event_handler = devfreq_cpufreq_ev_handler,
};
#define NUM_COLS 2
static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
{
int len, nf, i, j;
u32 data;
struct freq_map *tbl;
if (!of_find_property(of_node, prop_name, &len))
return NULL;
len /= sizeof(data);
if (len % NUM_COLS || len == 0)
return NULL;
nf = len / NUM_COLS;
tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
if (!tbl)
return NULL;
for (i = 0, j = 0; i < nf; i++, j += 2) {
of_property_read_u32_index(of_node, prop_name, j, &data);
tbl[i].cpu_khz = data;
of_property_read_u32_index(of_node, prop_name, j + 1, &data);
tbl[i].target_freq = data;
}
tbl[i].cpu_khz = 0;
return tbl;
}
#define PROP_TARGET "target-dev"
#define PROP_TABLE "cpu-to-dev-map"
static int add_table_from_of(struct device_node *of_node)
{
struct device_node *target_of_node;
struct devfreq_node *node;
struct freq_map *common_tbl;
struct freq_map **tbl_list = NULL;
static char prop_name[] = PROP_TABLE "-999999";
int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
if (!target_of_node)
return -EINVAL;
node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
if (!node)
return -ENOMEM;
common_tbl = read_tbl(of_node, PROP_TABLE);
if (!common_tbl) {
tbl_list = kcalloc(num_possible_cpus(), sizeof(*tbl_list), GFP_KERNEL);
if (!tbl_list) {
ret = -ENOMEM;
goto err_list;
}
for_each_possible_cpu(cpu) {
ret = scnprintf(prop_name, prop_sz, "%s-%d",
PROP_TABLE, cpu);
if (ret >= prop_sz) {
pr_warn("More CPUs than I can handle!\n");
pr_warn("Skipping rest of the tables!\n");
break;
}
tbl_list[cpu] = read_tbl(of_node, prop_name);
if (tbl_list[cpu])
cnt++;
}
}
if (!common_tbl && !cnt) {
ret = -EINVAL;
goto err_tbl;
}
mutex_lock(&state_lock);
node->of_node = target_of_node;
node->map = tbl_list;
node->common_map = common_tbl;
list_add_tail(&node->list, &devfreq_list);
mutex_unlock(&state_lock);
return 0;
err_tbl:
kfree(tbl_list);
err_list:
kfree(node);
return ret;
}
static int __init devfreq_cpufreq_init(void)
{
int ret;
struct device_node *of_par, *of_child;
of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
if (of_par) {
for_each_child_of_node(of_par, of_child) {
ret = add_table_from_of(of_child);
if (ret)
pr_err("Parsing %s failed!\n", of_child->name);
else
pr_debug("Parsed %s.\n", of_child->name);
}
of_node_put(of_par);
} else {
pr_info("No tables parsed from DT.\n");
return 0;
}
ret = devfreq_add_governor(&devfreq_cpufreq);
if (ret) {
pr_err("Governor add failed!\n");
return ret;
}
pr_err("Governor add success for cpufreq!\n");
return 0;
}
subsys_initcall(devfreq_cpufreq_init);
static void __exit devfreq_cpufreq_exit(void)
{
int ret, cpu;
struct devfreq_node *node, *tmp;
struct device_node *of_par;
of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
if (!of_par)
return;
ret = devfreq_remove_governor(&devfreq_cpufreq);
if (ret)
pr_err("Governor remove failed!\n");
mutex_lock(&state_lock);
list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
kfree(node->common_map);
for_each_possible_cpu(cpu)
kfree(node->map[cpu]);
kfree(node->map);
list_del(&node->list);
kfree(node);
}
mutex_unlock(&state_lock);
}
module_exit(devfreq_cpufreq_exit);
MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
MODULE_LICENSE("GPL");

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of.h>
@ -66,8 +66,6 @@ void free_pdata(const struct platform_data *pdata)
static int heap_dt_init(struct device_node *mem_node,
struct platform_heap *heap)
{
const __be32 *basep;
u64 base, size;
struct device *dev = heap->dev;
struct reserved_mem *rmem;
int ret = 0;
@ -97,19 +95,8 @@ static int heap_dt_init(struct device_node *mem_node,
}
}
basep = of_get_address(mem_node, 0, &size, NULL);
if (basep) {
base = of_translate_address(mem_node, basep);
if (base != OF_BAD_ADDR) {
heap->base = base;
heap->size = size;
} else {
ret = -EINVAL;
dev_err(heap->dev,
"Failed to get heap base/size\n");
of_reserved_mem_device_release(dev);
}
}
heap->base = rmem->base;
heap->size = rmem->size;
heap->is_nomap = of_property_read_bool(mem_node, "no-map");
return ret;

View file

@ -207,6 +207,10 @@ static enum qcom_scm_convention __get_convention(void)
* Per the "SMC calling convention specification", the 64-bit calling
* convention can only be used when the client is 64-bit, otherwise
* system will encounter the undefined behaviour.
* When running on 32bit kernel, SCM call with convention
* SMC_CONVENTION_ARM_64 is causing the system crash. To avoid that
* use SMC_CONVENTION_ARM_64 for 64bit kernel and SMC_CONVENTION_ARM_32
* for 32bit kernel.
*/
#if IS_ENABLED(CONFIG_ARM64)
/*

View file

@ -370,8 +370,8 @@ static struct regmap *qcom_icc_rpmh_map(struct platform_device *pdev,
static bool is_voter_disabled(char *voter)
{
if ((strnstr(voter, "disp", strlen(voter)) &&
(socinfo_get_part_info(PART_DISPLAY) || socinfo_get_part_info(PART_DISPLAY1))) ||
if ((!strcmp(voter, "disp") && socinfo_get_part_info(PART_DISPLAY)) ||
(!strcmp(voter, "disp2") && socinfo_get_part_info(PART_DISPLAY1)) ||
(strnstr(voter, "cam", strlen(voter)) && socinfo_get_part_info(PART_CAMERA)))
return true;
@ -405,7 +405,11 @@ static int qcom_icc_init_disabled_parts(struct qcom_icc_provider *qp)
if (!qn)
continue;
if (strnstr(qn->name, voter_name, strlen(qn->name)))
/* Find the ICC node to be disabled by comparing voter_name in
* node name string, adjust the start position accordingly
*/
if (!strcmp(qn->name + (strlen(qn->name) - strlen(voter_name)),
voter_name))
qn->disabled = true;
}
}

View file

@ -513,6 +513,15 @@ config QSEECOM_PROXY
These callback functions can be used to start, shutdown and
send commands to the trusted apps.
config BOOTMARKER_PROXY
tristate "To enable Bootmarker proxy driver for kernel client"
help
Bootmarker proxy driver serves the kernel clients by providing
required ops via call back functions with a minimal framework.
These callback functions can be used to access the place marker.
To compile this driver as a module, choose M here.
If unsure, say N.
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"

View file

@ -64,3 +64,4 @@ obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/
obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o
obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
obj-$(CONFIG_QSEECOM_PROXY) += qseecom_proxy.o
obj-$(CONFIG_BOOTMARKER_PROXY) += bootmarker_proxy.o

View file

@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/bootmarker_kernel.h>
#include <linux/of_platform.h>
#include <linux/mod_devicetable.h>
static struct bootmarker_drv_ops bootmarker_fun_ops = {0};
int provide_bootmarker_kernel_fun_ops(const struct bootmarker_drv_ops *ops)
{
if (!ops) {
pr_err("ops is NULL\n");
return -EINVAL;
}
bootmarker_fun_ops = *ops;
pr_debug("Boot Marker proxy Ready to be served\n");
return 0;
}
EXPORT_SYMBOL_GPL(provide_bootmarker_kernel_fun_ops);
int bootmarker_place_marker(const char *name)
{
int32_t ret = -EPERM;
if (bootmarker_fun_ops.bootmarker_place_marker) {
ret = bootmarker_fun_ops.bootmarker_place_marker(name);
if (ret != 0)
pr_err("%s: command failed = %d\n", __func__, ret);
} else {
pr_err_ratelimited("bootmarker driver is not up yet\n");
ret = -EAGAIN;
}
return ret;
}
EXPORT_SYMBOL_GPL(bootmarker_place_marker);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Boot Marker proxy driver");

View file

@ -5,6 +5,5 @@
obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \
emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \
emac-sgmii-qdf2400.o
qcom-emac-objs := emac_main.o emac_hw.o emac_ethtool.o emac_ptp.o \
emac_phy.o emac_rgmii.o emac_sgmii.o

View file

@ -1076,7 +1076,7 @@ static void emac_receive_skb(struct emac_rx_queue *rx_q,
if (vlan_flag) {
u16 vlan;
EMAC_TAG_TO_VLAN(vlan_tag, vlan);
vlan = ((((vlan_tag) >> 8) & 0xFF) | (((vlan_tag) & 0xFF) << 8));
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
}
@ -1449,7 +1449,8 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
if (skb_vlan_tag_present(skb)) {
u16 tag;
EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
tag = (((skb_vlan_tag_get(skb) >> 8) & 0xFF) |
((skb_vlan_tag_get(skb) & 0xFF) << 8));
TPD_CVLAN_TAG_SET(&tpd, tag);
TPD_INSTC_SET(&tpd, 1);
}

View file

@ -0,0 +1,401 @@
/* SPDX-License-Identifier: GPL-2.0-only
* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __EMAC_DEFINES_H__
#define __EMAC_DEFINES_H__
/* EMAC_DMA_MAS_CTRL */
#define DEV_ID_NUM_BMSK 0x7f000000
#define DEV_ID_NUM_SHFT 24
#define DEV_REV_NUM_BMSK 0xff0000
#define DEV_REV_NUM_SHFT 16
#define INT_RD_CLR_EN 0x4000
#define IRQ_MODERATOR2_EN 0x800
#define IRQ_MODERATOR_EN 0x400
#define LPW_CLK_SEL 0x80
#define LPW_STATE 0x20
#define LPW_MODE 0x10
#define SOFT_RST 0x1
/* EMAC_IRQ_MOD_TIM_INIT */
#define IRQ_MODERATOR2_INIT_BMSK 0xffff0000
#define IRQ_MODERATOR2_INIT_SHFT 16
#define IRQ_MODERATOR_INIT_BMSK 0xffff
#define IRQ_MODERATOR_INIT_SHFT 0
/* EMAC_MDIO_CTRL */
#define MDIO_MODE 0x40000000
#define MDIO_PR 0x20000000
#define MDIO_AP_EN 0x10000000
#define MDIO_BUSY 0x8000000
#define MDIO_CLK_SEL_BMSK 0x7000000
#define MDIO_CLK_SEL_SHFT 24
#define MDIO_START 0x800000
#define SUP_PREAMBLE 0x400000
#define MDIO_RD_NWR 0x200000
#define MDIO_REG_ADDR_BMSK 0x1f0000
#define MDIO_REG_ADDR_SHFT 16
#define MDIO_DATA_BMSK 0xffff
#define MDIO_DATA_SHFT 0
/* EMAC_PHY_STS */
#define PHY_ADDR_BMSK 0x1f0000
#define PHY_ADDR_SHFT 16
/* EMAC_MDIO_EX_CTRL */
#define DEVAD_BMSK 0x1f0000
#define DEVAD_SHFT 16
#define EX_REG_ADDR_BMSK 0xffff
#define EX_REG_ADDR_SHFT 0
/* EMAC_MAC_CTRL */
#define SINGLE_PAUSE_MODE 0x10000000
#define DEBUG_MODE 0x8000000
#define BROAD_EN 0x4000000
#define MULTI_ALL 0x2000000
#define RX_CHKSUM_EN 0x1000000
#define HUGE 0x800000
#define SPEED(x) (((x) & 0x3) << 20)
#define SPEED_MASK SPEED(0x3)
#define SIMR 0x80000
#define TPAUSE 0x10000
#define PROM_MODE 0x8000
#define VLAN_STRIP 0x4000
#define PRLEN_BMSK 0x3c00
#define PRLEN_SHFT 10
#define HUGEN 0x200
#define FLCHK 0x100
#define PCRCE 0x80
#define CRCE 0x40
#define FULLD 0x20
#define MAC_LP_EN 0x10
#define RXFC 0x8
#define TXFC 0x4
#define RXEN 0x2
#define TXEN 0x1
/* EMAC_WOL_CTRL0 */
#define LK_CHG_PME 0x20
#define LK_CHG_EN 0x10
#define MG_FRAME_PME 0x8
#define MG_FRAME_EN 0x4
#define WK_FRAME_EN 0x1
/* EMAC_DESC_CTRL_3 */
#define RFD_RING_SIZE_BMSK 0xfff
/* EMAC_DESC_CTRL_4 */
#define RX_BUFFER_SIZE_BMSK 0xffff
/* EMAC_DESC_CTRL_6 */
#define RRD_RING_SIZE_BMSK 0xfff
/* EMAC_DESC_CTRL_9 */
#define TPD_RING_SIZE_BMSK 0xffff
/* EMAC_TXQ_CTRL_0 */
#define NUM_TXF_BURST_PREF_BMSK 0xffff0000
#define NUM_TXF_BURST_PREF_SHFT 16
#define LS_8023_SP 0x80
#define TXQ_MODE 0x40
#define TXQ_EN 0x20
#define IP_OP_SP 0x10
#define NUM_TPD_BURST_PREF_BMSK 0xf
#define NUM_TPD_BURST_PREF_SHFT 0
/* EMAC_TXQ_CTRL_1 */
#define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff
/* EMAC_TXQ_CTRL_2 */
#define TXF_HWM_BMSK 0xfff0000
#define TXF_LWM_BMSK 0xfff
/* EMAC_RXQ_CTRL_0 */
#define RXQ_EN 0x80000000
#define CUT_THRU_EN 0x40000000
#define RSS_HASH_EN 0x20000000
#define NUM_RFD_BURST_PREF_BMSK 0x3f00000
#define NUM_RFD_BURST_PREF_SHFT 20
#define IDT_TABLE_SIZE_BMSK 0x1ff00
#define IDT_TABLE_SIZE_SHFT 8
#define SP_IPV6 0x80
/* EMAC_RXQ_CTRL_1 */
#define JUMBO_1KAH_BMSK 0xf000
#define JUMBO_1KAH_SHFT 12
#define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0
#define RFD_PREF_LOW_THRESHOLD_SHFT 6
#define RFD_PREF_UP_THRESHOLD_BMSK 0x3f
#define RFD_PREF_UP_THRESHOLD_SHFT 0
/* EMAC_RXQ_CTRL_2 */
#define RXF_DOF_THRESHOLD_BMSK 0xfff0000
#define RXF_DOF_THRESHOLD_SHFT 16
#define RXF_UOF_THRESHOLD_BMSK 0xfff
#define RXF_UOF_THRESHOLD_SHFT 0
/* EMAC_RXQ_CTRL_3 */
#define RXD_TIMER_BMSK 0xffff0000
#define RXD_THRESHOLD_BMSK 0xfff
#define RXD_THRESHOLD_SHFT 0
/* EMAC_DMA_CTRL */
#define DMAW_DLY_CNT_BMSK 0xf0000
#define DMAW_DLY_CNT_SHFT 16
#define DMAR_DLY_CNT_BMSK 0xf800
#define DMAR_DLY_CNT_SHFT 11
#define DMAR_REQ_PRI 0x400
#define REGWRBLEN_BMSK 0x380
#define REGWRBLEN_SHFT 7
#define REGRDBLEN_BMSK 0x70
#define REGRDBLEN_SHFT 4
#define OUT_ORDER_MODE 0x4
#define ENH_ORDER_MODE 0x2
#define IN_ORDER_MODE 0x1
/* EMAC_MAILBOX_13 */
#define RFD3_PROC_IDX_BMSK 0xfff0000
#define RFD3_PROC_IDX_SHFT 16
#define RFD3_PROD_IDX_BMSK 0xfff
#define RFD3_PROD_IDX_SHFT 0
/* EMAC_MAILBOX_2 */
#define NTPD_CONS_IDX_BMSK 0xffff0000
#define NTPD_CONS_IDX_SHFT 16
/* EMAC_MAILBOX_3 */
#define RFD0_CONS_IDX_BMSK 0xfff
#define RFD0_CONS_IDX_SHFT 0
/* EMAC_INT_STATUS */
#define DIS_INT BIT(31)
#define PTP_INT BIT(30)
#define RFD4_UR_INT BIT(29)
#define TX_PKT_INT3 BIT(26)
#define TX_PKT_INT2 BIT(25)
#define TX_PKT_INT1 BIT(24)
#define RX_PKT_INT3 BIT(19)
#define RX_PKT_INT2 BIT(18)
#define RX_PKT_INT1 BIT(17)
#define RX_PKT_INT0 BIT(16)
#define TX_PKT_INT BIT(15)
#define TXQ_TO_INT BIT(14)
#define GPHY_WAKEUP_INT BIT(13)
#define GPHY_LINK_DOWN_INT BIT(12)
#define GPHY_LINK_UP_INT BIT(11)
#define DMAW_TO_INT BIT(10)
#define DMAR_TO_INT BIT(9)
#define TXF_UR_INT BIT(8)
#define RFD3_UR_INT BIT(7)
#define RFD2_UR_INT BIT(6)
#define RFD1_UR_INT BIT(5)
#define RFD0_UR_INT BIT(4)
#define RXF_OF_INT BIT(3)
#define SW_MAN_INT BIT(2)
/* EMAC_INT_RETRIG_INIT */
#define INT_RETRIG_TIME_BMSK 0xffff
/* EMAC_MAILBOX_11 */
#define H3TPD_PROD_IDX_BMSK 0xffff0000
#define H3TPD_PROD_IDX_SHFT 16
/* EMAC_AXI_MAST_CTRL */
#define DATA_BYTE_SWAP 0x8
#define MAX_BOUND 0x2
#define MAX_BTYPE 0x1
/* EMAC_MAILBOX_12 */
#define H3TPD_CONS_IDX_BMSK 0xffff0000
#define H3TPD_CONS_IDX_SHFT 16
/* EMAC_MAILBOX_9 */
#define H2TPD_PROD_IDX_BMSK 0xffff
#define H2TPD_PROD_IDX_SHFT 0
/* EMAC_MAILBOX_10 */
#define H1TPD_CONS_IDX_BMSK 0xffff0000
#define H1TPD_CONS_IDX_SHFT 16
#define H2TPD_CONS_IDX_BMSK 0xffff
#define H2TPD_CONS_IDX_SHFT 0
/* EMAC_ATHR_HEADER_CTRL */
#define HEADER_CNT_EN 0x2
#define HEADER_ENABLE 0x1
/* EMAC_MAILBOX_0 */
#define RFD0_PROC_IDX_BMSK 0xfff0000
#define RFD0_PROC_IDX_SHFT 16
#define RFD0_PROD_IDX_BMSK 0xfff
#define RFD0_PROD_IDX_SHFT 0
/* EMAC_MAILBOX_5 */
#define RFD1_PROC_IDX_BMSK 0xfff0000
#define RFD1_PROC_IDX_SHFT 16
#define RFD1_PROD_IDX_BMSK 0xfff
#define RFD1_PROD_IDX_SHFT 0
/* EMAC_MAILBOX_6 */
#define RFD2_PROC_IDX_BMSK 0xfff0000
#define RFD2_PROC_IDX_SHFT 16
#define RFD2_PROD_IDX_BMSK 0xfff
#define RFD2_PROD_IDX_SHFT 0
/* EMAC_CORE_HW_VERSION */
#define MAJOR_BMSK 0xf0000000
#define MAJOR_SHFT 28
#define MINOR_BMSK 0xfff0000
#define MINOR_SHFT 16
#define STEP_BMSK 0xffff
#define STEP_SHFT 0
/* EMAC_MISC_CTRL */
#define RX_UNCPL_INT_EN 0x1
/* EMAC_MAILBOX_7 */
#define RFD2_CONS_IDX_BMSK 0xfff0000
#define RFD2_CONS_IDX_SHFT 16
#define RFD1_CONS_IDX_BMSK 0xfff
#define RFD1_CONS_IDX_SHFT 0
/* EMAC_MAILBOX_8 */
#define RFD3_CONS_IDX_BMSK 0xfff
#define RFD3_CONS_IDX_SHFT 0
/* EMAC_MAILBOX_15 */
#define NTPD_PROD_IDX_BMSK 0xffff
#define NTPD_PROD_IDX_SHFT 0
/* EMAC_MAILBOX_16 */
#define H1TPD_PROD_IDX_BMSK 0xffff
#define H1TPD_PROD_IDX_SHFT 0
/* EMAC_EMAC_WRAPPER_CSR1 */
#define TX_INDX_FIFO_SYNC_RST BIT(23)
#define TX_TS_FIFO_SYNC_RST BIT(22)
#define RX_TS_FIFO2_SYNC_RST BIT(21)
#define RX_TS_FIFO1_SYNC_RST BIT(20)
#define TX_TS_ENABLE BIT(16)
#define DIS_1588_CLKS BIT(11)
#define FREQ_MODE BIT(9)
#define ENABLE_RRD_TIMESTAMP BIT(3)
/* EMAC_EMAC_WRAPPER_CSR2 */
#define HDRIVE_BMSK 0x3000
#define HDRIVE_SHFT 12
#define SLB_EN 0x200
#define PLB_EN 0x100
#define WOL_EN 0x80
#define CKEDGE_SEL 0x40
#define TX_ID_EN_L 0x20
#define RX_ID_EN_L 0x10
#define RGMII_PHY_MODE_BMSK 0x6
#define RGMII_PHY_MODE_SHFT 1
#define PHY_RESET 0x1
/* EMAC_EMAC_WRAPPER_CSR3 */
#define PLL_RESET 0x1000000
#define PLL_L_VAL_5_0_BMSK 0xfc0000
#define PLL_L_VAL_5_0_SHFT 18
#define BYPASSNL 0x10000
/* EMAC_EMAC_WRAPPER_CSR5 */
#define RMII_125_CLK_EN 0x20
/* EMAC_EMAC_WRAPPER_CSR10 */
#define RD_CLR_1588 0x2
#define DIS_1588 0x1
/* EMAC_EMAC_WRAPPER_STATUS */
#define PLL_LOCK_DET 0x1
/* EMAC_EMAC_WRAPPER_TX_TS_INX */
#define EMAC_WRAPPER_TX_TS_EMPTY 0x80000000
#define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff
/* EMAC_P1588_CTRL_REG */
#define ATTACH_EN 0x10
#define BYPASS_O 0x8
#define CLOCK_MODE_BMSK 0x6
#define CLOCK_MODE_SHFT 1
#define ETH_MODE_SW 0x1
/* EMAC_P1588_TX_LATENCY */
#define TX_LATENCY_BMSK 0xffff
#define TX_LATENCY_SHFT 0
/* EMAC_P1588_INC_VALUE_2 */
#define INC_VALUE_2_BMSK 0xffff
/* EMAC_P1588_INC_VALUE_1 */
#define INC_VALUE_1_BMSK 0xffff
/* EMAC_P1588_NANO_OFFSET_2 */
#define NANO_OFFSET_2_BMSK 0xffff
/* EMAC_P1588_NANO_OFFSET_1 */
#define NANO_OFFSET_1_BMSK 0xffff
/* EMAC_P1588_SEC_OFFSET_2 */
#define SEC_OFFSET_2_BMSK 0xffff
/* EMAC_P1588_SEC_OFFSET_1 */
#define SEC_OFFSET_1_BMSK 0xffff
/* EMAC_P1588_REAL_TIME_5 */
#define REAL_TIME_5_BMSK 0xffff
#define REAL_TIME_5_SHFT 0
/* EMAC_P1588_REAL_TIME_4 */
#define REAL_TIME_4_BMSK 0xffff
#define REAL_TIME_4_SHFT 0
/* EMAC_P1588_REAL_TIME_3 */
#define REAL_TIME_3_BMSK 0xffff
#define REAL_TIME_3_SHFT 0
/* EMAC_P1588_REAL_TIME_2 */
#define REAL_TIME_2_BMSK 0xffff
#define REAL_TIME_2_SHFT 0
/* EMAC_P1588_REAL_TIME_1 */
#define REAL_TIME_1_BMSK 0xffff
#define REAL_TIME_1_SHFT 0
/* EMAC_P1588_EXPANDED_INT_STATUS */
#define PPS_IN 0x20
/* EMAC_P1588_RTC_EXPANDED_CONFIG */
#define RTC_READ_MODE 0x20
#define RTC_SNAPSHOT 0x10
#define LOAD_RTC 0x1
/* EMAC_P1588_RTC_PRELOADED_4 */
#define RTC_PRELOADED_4_BMSK 0xffff
/* EMAC_P1588_RTC_PRELOADED_3 */
#define RTC_PRELOADED_3_BMSK 0xffff
/* EMAC_P1588_RTC_PRELOADED_2 */
#define RTC_PRELOADED_2_BMSK 0xffff
/* EMAC_P1588_RTC_PRELOADED_1 */
#define RTC_PRELOADED_1_BMSK 0xffff
/* EMAC_P1588_GRAND_MASTER_CONFIG_0 */
#define GRANDMASTER_MODE 0x40
#define GM_PPS_SYNC 0x20
#endif /* __EMAC_DEFINES_H__ */

View file

@ -0,0 +1,410 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Qualcomm Technologies, Inc. EMAC Ethernet Controller ethtool support
*/
#include <linux/ethtool.h>
#include <linux/pm_runtime.h>
#include "emac_main.h"
#include "emac_hw.h"
#define EMAC_MAX_REG_SIZE 10
#define EMAC_STATS_LEN 51
static const char *const emac_ethtool_stat_strings[] = {
"rx ok",
"rx bcast",
"rx mcast",
"rx pause",
"rx ctrl",
"rx fcs err",
"rx len err",
"rx byte cnt",
"rx runt",
"rx frag",
"rx sz 64",
"rx sz 65 127",
"rx sz 128 255",
"rx sz 256 511",
"rx sz 512 1023",
"rx sz 1024 1518",
"rx sz 1519 max",
"rx sz ov",
"rx rxf ov",
"rx align err",
"rx bcast byte cnt",
"rx mcast byte cnt",
"rx err addr",
"rx crc align",
"rx jubbers",
"tx ok",
"tx bcast",
"tx mcast",
"tx pause",
"tx exc defer",
"tx ctrl",
"tx defer",
"tx byte cnt",
"tx sz 64",
"tx sz 65 127",
"tx sz 128 255",
"tx sz 256 511",
"tx sz 512 1023",
"tx sz 1024 1518",
"tx sz 1519 max",
"tx 1 col",
"tx 2 col",
"tx late col",
"tx abort col",
"tx underrun",
"tx rd eop",
"tx len err",
"tx trunc",
"tx bcast byte",
"tx mcast byte",
"tx col",
};
static void emac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct phy_device *phydev = netdev->phydev;
pause->autoneg = (phydev->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
pause->rx_pause = (phydev->pause) ? 1 : 0;
pause->tx_pause = (phydev->pause != phydev->asym_pause) ? 1 : 0;
}
static int emac_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_phy *phy = &adpt->phy;
struct phy_device *phydev = netdev->phydev;
enum emac_flow_ctrl req_fc_mode;
bool disable_fc_autoneg;
int ret = 0;
if (!netif_running(netdev))
return -EINVAL;
if (!phydev)
return -ENODEV;
req_fc_mode = phy->req_fc_mode;
disable_fc_autoneg = phydev->autoneg;
if (pause->autoneg != AUTONEG_ENABLE)
disable_fc_autoneg = true;
else
disable_fc_autoneg = false;
if (pause->rx_pause && pause->tx_pause) {
req_fc_mode = EMAC_FC_FULL;
} else if (pause->rx_pause && !pause->tx_pause) {
req_fc_mode = EMAC_FC_RX_PAUSE;
} else if (!pause->rx_pause && pause->tx_pause) {
req_fc_mode = EMAC_FC_TX_PAUSE;
} else if (!pause->rx_pause && !pause->tx_pause) {
req_fc_mode = EMAC_FC_NONE;
} else {
CLR_FLAG(adpt, ADPT_STATE_RESETTING);
return -EINVAL;
}
pm_runtime_get_sync(netdev->dev.parent);
if (phy->req_fc_mode != req_fc_mode ||
phy->disable_fc_autoneg != disable_fc_autoneg) {
phy->req_fc_mode = req_fc_mode;
phy->disable_fc_autoneg = disable_fc_autoneg;
if (phydev->autoneg) {
switch (phy->req_fc_mode) {
case EMAC_FC_FULL:
linkmode_set_bit(ADVERTISED_Pause, phydev->supported);
linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->supported);
linkmode_set_bit(ADVERTISED_Pause, phydev->advertising);
linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->advertising);
break;
case EMAC_FC_TX_PAUSE:
linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->supported);
linkmode_set_bit(ADVERTISED_Asym_Pause, phydev->advertising);
break;
default:
linkmode_clear_bit(ADVERTISED_Pause, phydev->supported);
linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->supported);
linkmode_clear_bit(ADVERTISED_Pause, phydev->advertising);
linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->advertising);
break;
}
if (phy->disable_fc_autoneg) {
linkmode_clear_bit(ADVERTISED_Pause, phydev->supported);
linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->supported);
linkmode_clear_bit(ADVERTISED_Pause, phydev->advertising);
linkmode_clear_bit(ADVERTISED_Asym_Pause, phydev->advertising);
}
}
if (phy->external)
ret = phy_start_aneg(phydev);
if (ret > 0)
emac_phy_config_fc(adpt);
}
pm_runtime_mark_last_busy(netdev->dev.parent);
pm_runtime_put_autosuspend(netdev->dev.parent);
return ret;
}
static u32 emac_get_msglevel(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
return adpt->msg_enable;
}
static void emac_set_msglevel(struct net_device *netdev, u32 data)
{
struct emac_adapter *adpt = netdev_priv(netdev);
adpt->msg_enable = data;
}
static int emac_get_regs_len(struct net_device *netdev)
{
return EMAC_MAX_REG_SIZE * sizeof(32);
}
static void emac_get_regs(struct net_device *netdev,
struct ethtool_regs *regs, void *buff)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_hw *hw = &adpt->hw;
u16 i;
u32 *val = buff;
static const u32 reg[EMAC_MAX_REG_SIZE] = {
EMAC_DMA_MAS_CTRL, EMAC_MAC_CTRL, EMAC_WOL_CTRL0,
EMAC_TXQ_CTRL_0, EMAC_RXQ_CTRL_0, EMAC_DMA_CTRL, EMAC_INT_MASK,
EMAC_AXI_MAST_CTRL, EMAC_CORE_HW_VERSION, EMAC_MISC_CTRL,
};
regs->version = 0;
regs->len = EMAC_MAX_REG_SIZE * sizeof(u32);
memset(val, 0, EMAC_MAX_REG_SIZE * sizeof(u32));
pm_runtime_get_sync(netdev->dev.parent);
for (i = 0; i < ARRAY_SIZE(reg); i++)
val[i] = emac_reg_r32(hw, EMAC, reg[i]);
pm_runtime_mark_last_busy(netdev->dev.parent);
pm_runtime_put_autosuspend(netdev->dev.parent);
}
static void emac_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct emac_adapter *adpt = netdev_priv(netdev);
strscpy(drvinfo->driver, adpt->netdev->name,
sizeof(drvinfo->driver));
strscpy(drvinfo->version, "Revision: 1.1.0.0",
sizeof(drvinfo->version));
strscpy(drvinfo->bus_info, dev_name(&netdev->dev),
sizeof(drvinfo->bus_info));
drvinfo->regdump_len = emac_get_regs_len(netdev);
}
static int emac_wol_exclusion(struct emac_adapter *adpt,
struct ethtool_wolinfo *wol)
{
struct emac_hw *hw = &adpt->hw;
/* WOL not supported except for the following */
switch (hw->devid) {
case EMAC_DEV_ID:
return 0;
default:
wol->supported = 0;
return -EINVAL;
}
}
static void emac_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct emac_adapter *adpt = netdev_priv(netdev);
wol->supported = WAKE_MAGIC | WAKE_PHY;
wol->wolopts = 0;
if (adpt->wol & EMAC_WOL_MAGIC)
wol->wolopts |= WAKE_MAGIC;
if (adpt->wol & EMAC_WOL_PHY)
wol->wolopts |= WAKE_PHY;
}
static int emac_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
u32 ret = 0;
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
return -EOPNOTSUPP;
if (emac_wol_exclusion(adpt, wol))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* Enable WOL interrupt */
ret = phy_ethtool_set_wol(phydev, wol);
if (ret)
return ret;
adpt->wol = 0;
if (wol->wolopts & WAKE_MAGIC) {
adpt->wol |= EMAC_WOL_MAGIC;
emac_wol_gpio_irq(adpt, true);
/* Release wakelock */
__pm_relax(adpt->link_wlock);
}
if (wol->wolopts & WAKE_PHY)
adpt->wol |= EMAC_WOL_PHY;
return ret;
}
static void emac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
ring->rx_max_pending = EMAC_MAX_RX_DESCS;
ring->tx_max_pending = EMAC_MAX_TX_DESCS;
ring->rx_pending = adpt->num_rxdescs;
ring->tx_pending = adpt->num_txdescs;
}
static int emac_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
struct netlink_ext_ack *extack)
{
struct emac_adapter *adpt = netdev_priv(netdev);
int retval = 0;
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
adpt->num_txdescs = clamp_t(u32, ring->tx_pending,
EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS);
adpt->num_rxdescs = clamp_t(u32, ring->rx_pending,
EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS);
if (netif_running(netdev))
retval = emac_resize_rings(netdev);
return retval;
}
static int emac_nway_reset(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
if (netif_running(netdev))
return emac_reinit_locked(adpt);
return 0;
}
static int emac_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
return 0;
case ETH_SS_STATS:
return EMAC_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
u16 i;
switch (stringset) {
case ETH_SS_TEST:
break;
case ETH_SS_STATS:
for (i = 0; i < EMAC_STATS_LEN; i++) {
strscpy(data, emac_ethtool_stat_strings[i],
ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
break;
}
}
static void emac_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats,
u64 *data)
{
struct emac_adapter *adpt = netdev_priv(netdev);
emac_update_hw_stats(adpt);
memcpy(data, &adpt->hw_stats, EMAC_STATS_LEN * sizeof(u64));
}
static const struct ethtool_ops emac_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_msglevel = emac_get_msglevel,
.set_msglevel = emac_set_msglevel,
.get_sset_count = emac_get_sset_count,
.get_strings = emac_get_strings,
.get_ethtool_stats = emac_get_ethtool_stats,
.get_ringparam = emac_get_ringparam,
.set_ringparam = emac_set_ringparam,
.get_pauseparam = emac_get_pauseparam,
.set_pauseparam = emac_set_pauseparam,
.nway_reset = emac_nway_reset,
.get_link = ethtool_op_get_link,
.get_regs_len = emac_get_regs_len,
.get_regs = emac_get_regs,
.get_wol = emac_get_wol,
.set_wol = emac_set_wol,
.get_drvinfo = emac_get_drvinfo,
};
/* Set ethtool operations */
void emac_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &emac_ethtool_ops;
}

View file

@ -0,0 +1,640 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Qualcomm Technologies, Inc. EMAC Ethernet Controller Hardware support
*/
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/qca8337.h>
#include "emac_hw.h"
#include "emac_ptp.h"
#define RFD_PREF_LOW_TH 0x10
#define RFD_PREF_UP_TH 0x10
#define JUMBO_1KAH 0x4
#define RXF_DOF_TH 0x0be
#define RXF_UOF_TH 0x1a0
#define RXD_TH 0x100
/* RGMII specific macros */
#define EMAC_RGMII_PLL_LOCK_TIMEOUT (HZ / 1000) /* 1ms */
#define EMAC_RGMII_CORE_IE_C 0x2001
#define EMAC_RGMII_PLL_L_VAL 0x14
#define EMAC_RGMII_PHY_MODE 0
/* REG */
u32 emac_reg_r32(struct emac_hw *hw, u8 base, u32 reg)
{
return readl_relaxed(hw->reg_addr[base] + reg);
}
void emac_reg_w32(struct emac_hw *hw, u8 base, u32 reg, u32 val)
{
writel_relaxed(val, hw->reg_addr[base] + reg);
}
void emac_reg_update32(struct emac_hw *hw, u8 base, u32 reg, u32 mask, u32 val)
{
u32 data;
data = emac_reg_r32(hw, base, reg);
emac_reg_w32(hw, base, reg, ((data & ~mask) | val));
}
u32 emac_reg_field_r32(struct emac_hw *hw, u8 base, u32 reg,
u32 mask, u32 shift)
{
u32 data;
data = emac_reg_r32(hw, base, reg);
return (data & mask) >> shift;
}
/* INTR */
void emac_hw_enable_intr(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
int i;
for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) {
struct emac_irq_per_dev *irq = &adpt->irq[i];
const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[i];
emac_reg_w32(hw, EMAC, irq_cmn->status_reg, (u32)~DIS_INT);
emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, irq->mask);
}
if (adpt->tstamp_en)
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK,
hw->ptp_intr_mask);
wmb(); /* ensure that irq and ptp setting are flushed to HW */
}
void emac_hw_disable_intr(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
int i;
for (i = 0; i < EMAC_NUM_CORE_IRQ; i++) {
const struct emac_irq_common *irq_cmn = &emac_irq_cmn_tbl[i];
emac_reg_w32(hw, EMAC, irq_cmn->status_reg, DIS_INT);
emac_reg_w32(hw, EMAC, irq_cmn->mask_reg, 0);
}
if (adpt->tstamp_en)
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK,
0);
wmb(); /* ensure that irq and ptp setting are flushed to HW */
}
/* MC */
void emac_hw_set_mc_addr(struct emac_hw *hw, u8 *addr)
{
u32 crc32, bit, reg, mta;
/* Calculate the CRC of the MAC address */
crc32 = ether_crc(ETH_ALEN, addr);
/* The HASH Table is an array of 2 32-bit registers. It is
* treated like an array of 64 bits (BitArray[hash_value]).
* Use the upper 6 bits of the above CRC as the hash value.
*/
reg = (crc32 >> 31) & 0x1;
bit = (crc32 >> 26) & 0x1F;
mta = emac_reg_r32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2));
mta |= (0x1 << bit);
emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0 + (reg << 2), mta);
wmb(); /* ensure that the mac address is flushed to HW */
}
void emac_hw_clear_mc_addr(struct emac_hw *hw)
{
emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG0, 0);
emac_reg_w32(hw, EMAC, EMAC_HASH_TAB_REG1, 0);
wmb(); /* ensure that clearing the mac address is flushed to HW */
}
/* definitions for RSS */
#define EMAC_RSS_KEY(_i, _type) \
(EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
#define EMAC_RSS_TBL(_i, _type) \
(EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
/* RSS */
void emac_hw_config_rss(struct emac_hw *hw)
{
int key_len_by_u32 = ARRAY_SIZE(hw->rss_key);
int idt_len_by_u32 = ARRAY_SIZE(hw->rss_idt);
u32 rxq0;
int i;
/* Fill out hash function keys */
for (i = 0; i < key_len_by_u32; i++) {
u32 key, idx_base;
idx_base = (key_len_by_u32 - i) * 4;
key = ((hw->rss_key[idx_base - 1]) |
(hw->rss_key[idx_base - 2] << 8) |
(hw->rss_key[idx_base - 3] << 16) |
(hw->rss_key[idx_base - 4] << 24));
emac_reg_w32(hw, EMAC, EMAC_RSS_KEY(i, u32), key);
}
/* Fill out redirection table */
for (i = 0; i < idt_len_by_u32; i++)
emac_reg_w32(hw, EMAC, EMAC_RSS_TBL(i, u32), hw->rss_idt[i]);
emac_reg_w32(hw, EMAC, EMAC_BASE_CPU_NUMBER, hw->rss_base_cpu);
rxq0 = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_0);
if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV4_EN)
rxq0 |= RXQ0_RSS_HSTYP_IPV4_EN;
else
rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_EN;
if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP4_EN)
rxq0 |= RXQ0_RSS_HSTYP_IPV4_TCP_EN;
else
rxq0 &= ~RXQ0_RSS_HSTYP_IPV4_TCP_EN;
if (hw->rss_hstype & EMAC_RSS_HSTYP_IPV6_EN)
rxq0 |= RXQ0_RSS_HSTYP_IPV6_EN;
else
rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_EN;
if (hw->rss_hstype & EMAC_RSS_HSTYP_TCP6_EN)
rxq0 |= RXQ0_RSS_HSTYP_IPV6_TCP_EN;
else
rxq0 &= ~RXQ0_RSS_HSTYP_IPV6_TCP_EN;
rxq0 |= ((hw->rss_idt_size << IDT_TABLE_SIZE_SHFT) &
IDT_TABLE_SIZE_BMSK);
rxq0 |= RSS_HASH_EN;
wmb(); /* ensure all parameters are written before we enable RSS */
emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, rxq0);
wmb(); /* ensure that enabling RSS is flushed to HW */
}
/* Config MAC modes */
void emac_hw_config_mac_ctrl(struct emac_hw *hw)
{
u32 mac;
mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL);
if (TEST_FLAG(hw, HW_VLANSTRIP_EN))
mac |= VLAN_STRIP;
else
mac &= ~VLAN_STRIP;
if (TEST_FLAG(hw, HW_PROMISC_EN))
mac |= PROM_MODE;
else
mac &= ~PROM_MODE;
if (TEST_FLAG(hw, HW_MULTIALL_EN))
mac |= MULTI_ALL;
else
mac &= ~MULTI_ALL;
if (TEST_FLAG(hw, HW_LOOPBACK_EN))
mac |= MAC_LP_EN;
else
mac &= ~MAC_LP_EN;
emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac);
wmb(); /* ensure MAC setting is flushed to HW */
}
/* Wake On LAN (WOL) */
void emac_hw_config_wol(struct emac_hw *hw, u32 wufc)
{
u32 wol = 0;
/* turn on magic packet event */
if (wufc & EMAC_WOL_MAGIC)
wol |= MG_FRAME_EN | MG_FRAME_PME | WK_FRAME_EN;
/* turn on link up event */
if (wufc & EMAC_WOL_PHY)
wol |= LK_CHG_EN | LK_CHG_PME;
emac_reg_w32(hw, EMAC, EMAC_WOL_CTRL0, wol);
wmb(); /* ensure that WOL setting is flushed to HW */
}
/* Power Management */
void emac_hw_config_pow_save(struct emac_hw *hw, u32 speed,
bool wol_en, bool rx_en)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
struct phy_device *phydev = adpt->phydev;
u32 dma_mas, mac;
dma_mas = emac_reg_r32(hw, EMAC, EMAC_DMA_MAS_CTRL);
dma_mas &= ~LPW_CLK_SEL;
dma_mas |= LPW_STATE;
mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL);
mac &= ~(FULLD | RXEN | TXEN);
mac = (mac & ~SPEED_MASK) | SPEED(1);
if (wol_en) {
if (rx_en)
mac |= (RXEN | BROAD_EN);
/* If WOL is enabled, set link speed/duplex for mac */
if (phydev->speed == SPEED_1000)
mac = (mac & ~SPEED_MASK) | (SPEED(2) & SPEED_MASK);
if (phydev->duplex == DUPLEX_FULL)
if (phydev->speed == SPEED_10 ||
phydev->speed == SPEED_100 ||
phydev->speed == SPEED_1000)
mac |= FULLD;
} else {
/* select lower clock speed if WOL is disabled */
dma_mas |= LPW_CLK_SEL;
}
emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL, dma_mas);
emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac);
wmb(); /* ensure that power setting is flushed to HW */
}
/* Config descriptor rings */
static void emac_mac_dma_rings_config(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
if (adpt->tstamp_en) {
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
0, ENABLE_RRD_TIMESTAMP);
}
/* TPD */
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_1,
EMAC_DMA_ADDR_HI(adpt->tx_queue[0].tpd.tpdma));
switch (adpt->num_txques) {
case 4:
emac_reg_w32(hw, EMAC, EMAC_H3TPD_BASE_ADDR_LO,
EMAC_DMA_ADDR_LO(adpt->tx_queue[3].tpd.tpdma));
fallthrough;
case 3:
emac_reg_w32(hw, EMAC, EMAC_H2TPD_BASE_ADDR_LO,
EMAC_DMA_ADDR_LO(adpt->tx_queue[2].tpd.tpdma));
fallthrough;
case 2:
emac_reg_w32(hw, EMAC, EMAC_H1TPD_BASE_ADDR_LO,
EMAC_DMA_ADDR_LO(adpt->tx_queue[1].tpd.tpdma));
fallthrough;
case 1:
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_8,
EMAC_DMA_ADDR_LO(adpt->tx_queue[0].tpd.tpdma));
break;
default:
emac_err(adpt, "Invalid number of TX queues (%d)\n",
adpt->num_txques);
return;
}
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_9,
adpt->tx_queue[0].tpd.count & TPD_RING_SIZE_BMSK);
/* RFD & RRD */
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_0,
EMAC_DMA_ADDR_HI(adpt->rx_queue[0].rfd.rfdma));
switch (adpt->num_rxques) {
case 4:
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_13,
EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rfd.rfdma));
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_16,
EMAC_DMA_ADDR_LO(adpt->rx_queue[3].rrd.rrdma));
fallthrough;
case 3:
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_12,
EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rfd.rfdma));
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_15,
EMAC_DMA_ADDR_LO(adpt->rx_queue[2].rrd.rrdma));
fallthrough;
case 2:
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_10,
EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rfd.rfdma));
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_14,
EMAC_DMA_ADDR_LO(adpt->rx_queue[1].rrd.rrdma));
fallthrough;
case 1:
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_2,
EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rfd.rfdma));
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_5,
EMAC_DMA_ADDR_LO(adpt->rx_queue[0].rrd.rrdma));
break;
default:
emac_err(adpt, "Invalid number of RX queues (%d)\n",
adpt->num_rxques);
return;
}
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_3,
adpt->rx_queue[0].rfd.count & RFD_RING_SIZE_BMSK);
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_6,
adpt->rx_queue[0].rrd.count & RRD_RING_SIZE_BMSK);
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_4,
adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK);
emac_reg_w32(hw, EMAC, EMAC_DESC_CTRL_11, 0);
wmb(); /* ensure all parameters are written before we enable them */
/* Load all of base address above */
emac_reg_w32(hw, EMAC, EMAC_INTER_SRAM_PART9, 1);
wmb(); /* ensure triggering HW to read ring pointers is flushed */
}
/* Config transmit parameters */
static void emac_hw_config_tx_ctrl(struct emac_hw *hw)
{
u16 tx_offload_thresh = EMAC_MAX_TX_OFFLOAD_THRESH;
u32 val;
emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_1,
(tx_offload_thresh >> 3) &
JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK);
val = (hw->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
NUM_TPD_BURST_PREF_BMSK;
val |= (TXQ_MODE | LS_8023_SP);
val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
NUM_TXF_BURST_PREF_BMSK;
emac_reg_w32(hw, EMAC, EMAC_TXQ_CTRL_0, val);
emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_2,
(TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
wmb(); /* ensure that Tx control settings are flushed to HW */
}
/* Config receive parameters */
static void emac_hw_config_rx_ctrl(struct emac_hw *hw)
{
u32 val;
val = ((hw->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
NUM_RFD_BURST_PREF_BMSK);
val |= (SP_IPV6 | CUT_THRU_EN);
emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_0, val);
val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_1);
val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
RFD_PREF_UP_THRESHOLD_BMSK);
val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
(RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
(RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT);
emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_1, val);
val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_2);
val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
val |= (RXF_DOF_TH << RXF_DOF_THRESHOLD_SHFT) |
(RXF_UOF_TH << RXF_UOF_THRESHOLD_SHFT);
emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_2, val);
val = emac_reg_r32(hw, EMAC, EMAC_RXQ_CTRL_3);
val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
val |= RXD_TH << RXD_THRESHOLD_SHFT;
emac_reg_w32(hw, EMAC, EMAC_RXQ_CTRL_3, val);
wmb(); /* ensure that Rx control settings are flushed to HW */
}
/* Config dma */
static void emac_hw_config_dma_ctrl(struct emac_hw *hw)
{
u32 dma_ctrl;
dma_ctrl = DMAR_REQ_PRI;
switch (hw->dma_order) {
case emac_dma_ord_in:
dma_ctrl |= IN_ORDER_MODE;
break;
case emac_dma_ord_enh:
dma_ctrl |= ENH_ORDER_MODE;
break;
case emac_dma_ord_out:
dma_ctrl |= OUT_ORDER_MODE;
break;
default:
break;
}
dma_ctrl |= (((u32)hw->dmar_block) << REGRDBLEN_SHFT) &
REGRDBLEN_BMSK;
dma_ctrl |= (((u32)hw->dmaw_block) << REGWRBLEN_SHFT) &
REGWRBLEN_BMSK;
dma_ctrl |= (((u32)hw->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
DMAR_DLY_CNT_BMSK;
dma_ctrl |= (((u32)hw->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
DMAW_DLY_CNT_BMSK;
emac_reg_w32(hw, EMAC, EMAC_DMA_CTRL, dma_ctrl);
wmb(); /* ensure that the DMA configuration is flushed to HW */
}
/* Configure MAC */
void emac_hw_config_mac(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
u32 val;
emac_hw_set_mac_addr(hw, (u8 *)adpt->netdev->dev_addr);
emac_mac_dma_rings_config(hw);
emac_reg_w32(hw, EMAC, EMAC_MAX_FRAM_LEN_CTRL,
adpt->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
emac_hw_config_tx_ctrl(hw);
emac_hw_config_rx_ctrl(hw);
emac_hw_config_dma_ctrl(hw);
if (TEST_FLAG(hw, HW_PTP_CAP))
emac_ptp_config(hw);
val = emac_reg_r32(hw, EMAC, EMAC_AXI_MAST_CTRL);
val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
val |= MAX_BTYPE;
emac_reg_w32(hw, EMAC, EMAC_AXI_MAST_CTRL, val);
emac_reg_w32(hw, EMAC, EMAC_CLK_GATE_CTRL, 0);
emac_reg_w32(hw, EMAC, EMAC_MISC_CTRL, RX_UNCPL_INT_EN);
wmb(); /* ensure that the MAC configuration is flushed to HW */
}
/* Reset MAC */
void emac_hw_reset_mac(struct emac_hw *hw)
{
emac_reg_w32(hw, EMAC, EMAC_INT_MASK, 0);
emac_reg_w32(hw, EMAC, EMAC_INT_STATUS, DIS_INT);
emac_hw_stop_mac(hw);
emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
wmb(); /* ensure mac is fully reset */
usleep_range(100, 150);
/* interrupt clear-on-read */
emac_reg_update32(hw, EMAC, EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
wmb(); /* ensure the interrupt clear-on-read setting is flushed to HW */
}
/* Start MAC */
void emac_hw_start_mac(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
struct phy_device *phydev = adpt->phydev;
u32 mac, csr1;
/* enable tx queue */
if (adpt->num_txques && adpt->num_txques <= EMAC_MAX_TX_QUEUES)
emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, 0, TXQ_EN);
/* enable rx queue */
if (adpt->num_rxques && adpt->num_rxques <= EMAC_MAX_RX_QUEUES)
emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, 0, RXQ_EN);
/* enable mac control */
mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL);
csr1 = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1);
mac |= TXEN | RXEN; /* enable RX/TX */
/* Configure MAC flow control to match the PHY's settings. */
if (phydev->pause)
mac |= RXFC;
if (phydev->pause != phydev->asym_pause)
mac |= TXFC;
/* setup link speed */
mac &= ~SPEED_MASK;
if (phydev->phy_id == QCA8337_PHY_ID) {
mac |= SPEED(2);
csr1 |= FREQ_MODE;
mac |= FULLD;
} else {
switch (phydev->speed) {
case SPEED_1000:
mac |= SPEED(2);
csr1 |= FREQ_MODE;
break;
default:
mac |= SPEED(1);
csr1 &= ~FREQ_MODE;
break;
}
if (phydev->duplex == DUPLEX_FULL)
mac |= FULLD;
else
mac &= ~FULLD;
}
/* other parameters */
mac |= (CRCE | PCRCE);
mac |= ((hw->preamble << PRLEN_SHFT) & PRLEN_BMSK);
mac |= BROAD_EN;
mac |= FLCHK;
mac &= ~RX_CHKSUM_EN;
mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
DEBUG_MODE | SINGLE_PAUSE_MODE);
emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, csr1);
emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac);
/* enable interrupt read clear, low power sleep mode and
* the irq moderators
*/
emac_reg_w32(hw, EMAC, EMAC_IRQ_MOD_TIM_INIT, hw->irq_mod);
emac_reg_w32(hw, EMAC, EMAC_DMA_MAS_CTRL,
(INT_RD_CLR_EN | LPW_MODE |
IRQ_MODERATOR_EN | IRQ_MODERATOR2_EN));
if (TEST_FLAG(hw, HW_PTP_CAP))
emac_ptp_set_linkspeed(hw, phydev->speed);
emac_hw_config_mac_ctrl(hw);
emac_reg_update32(hw, EMAC, EMAC_ATHR_HEADER_CTRL,
(HEADER_ENABLE | HEADER_CNT_EN), 0);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
wmb(); /* ensure that MAC setting are flushed to HW */
}
/* Stop MAC */
void emac_hw_stop_mac(struct emac_hw *hw)
{
emac_reg_update32(hw, EMAC, EMAC_RXQ_CTRL_0, RXQ_EN, 0);
emac_reg_update32(hw, EMAC, EMAC_TXQ_CTRL_0, TXQ_EN, 0);
emac_reg_update32(hw, EMAC, EMAC_MAC_CTRL, (TXEN | RXEN), 0);
wmb(); /* ensure mac is stopped before we proceed */
usleep_range(1000, 1050);
}
/* set MAC address */
void emac_hw_set_mac_addr(struct emac_hw *hw, u8 *addr)
{
u32 sta;
/* for example: 00-A0-C6-11-22-33
* 0<-->C6112233, 1<-->00A0.
*/
/* low 32bit word */
sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
(((u32)addr[4]) << 8) | (((u32)addr[5]));
emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR0, sta);
/* hight 32bit word */
sta = (((u32)addr[0]) << 8) | (((u32)addr[1]));
emac_reg_w32(hw, EMAC, EMAC_MAC_STA_ADDR1, sta);
wmb(); /* ensure that the MAC address is flushed to HW */
}
/* Read one entry from the HW tx timestamp FIFO */
bool emac_hw_read_tx_tstamp(struct emac_hw *hw, struct emac_hwtxtstamp *ts)
{
u32 ts_idx;
ts_idx = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_INX);
if (ts_idx & EMAC_WRAPPER_TX_TS_EMPTY)
return false;
ts->ns = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_LO);
ts->sec = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_TX_TS_HI);
ts->ts_idx = ts_idx & EMAC_WRAPPER_TX_TS_INX_BMSK;
return true;
}

View file

@ -0,0 +1,149 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _EMAC_HW_H_
#define _EMAC_HW_H_
#include <linux/mii.h>
#include "emac_main.h"
#include "emac_regs.h"
#include "emac_defines.h"
/* function prototype */
/* REG */
u32 emac_reg_r32(struct emac_hw *hw, u8 base, u32 reg);
void emac_reg_w32(struct emac_hw *hw, u8 base, u32 reg, u32 val);
void emac_reg_update32(struct emac_hw *hw, u8 base, u32 reg,
u32 mask, u32 val);
u32 emac_reg_field_r32(struct emac_hw *hw, u8 base, u32 reg,
u32 mask, u32 shift);
void emac_hw_config_pow_save(struct emac_hw *hw, u32 speed, bool wol_en,
bool rx_en);
/* MAC */
void emac_hw_enable_intr(struct emac_hw *hw);
void emac_hw_disable_intr(struct emac_hw *hw);
void emac_hw_set_mc_addr(struct emac_hw *hw, u8 *addr);
void emac_hw_clear_mc_addr(struct emac_hw *hw);
void emac_hw_config_mac_ctrl(struct emac_hw *hw);
void emac_hw_config_rss(struct emac_hw *hw);
void emac_hw_config_wol(struct emac_hw *hw, u32 wufc);
int emac_hw_config_fc(struct emac_hw *hw);
void emac_hw_reset_mac(struct emac_hw *hw);
void emac_hw_config_mac(struct emac_hw *hw);
void emac_hw_start_mac(struct emac_hw *hw);
void emac_hw_stop_mac(struct emac_hw *hw);
void emac_hw_set_mac_addr(struct emac_hw *hw, u8 *addr);
/* TX Timestamp */
bool emac_hw_read_tx_tstamp(struct emac_hw *hw, struct emac_hwtxtstamp *ts);
#define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT)
#define IMR_EXTENDED_MASK (\
SW_MAN_INT |\
ISR_OVER |\
ISR_ERROR |\
ISR_TX_PKT)
#define ISR_RX_PKT (\
RX_PKT_INT0 |\
RX_PKT_INT1 |\
RX_PKT_INT2 |\
RX_PKT_INT3)
#define ISR_TX_PKT (\
TX_PKT_INT |\
TX_PKT_INT1 |\
TX_PKT_INT2 |\
TX_PKT_INT3)
#define ISR_GPHY_LINK (\
GPHY_LINK_UP_INT |\
GPHY_LINK_DOWN_INT)
#define ISR_OVER (\
RFD0_UR_INT |\
RFD1_UR_INT |\
RFD2_UR_INT |\
RFD3_UR_INT |\
RFD4_UR_INT |\
RXF_OF_INT |\
TXF_UR_INT)
#define ISR_ERROR (\
DMAR_TO_INT |\
DMAW_TO_INT |\
TXQ_TO_INT)
#define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0
#define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22
#define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0
#define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24
#define RXQ0_NUM_RFD_PREF_DEF 8
#define TXQ0_NUM_TPD_PREF_DEF 5
#define EMAC_PREAMBLE_DEF 7
#define DMAR_DLY_CNT_DEF 15
#define DMAW_DLY_CNT_DEF 4
#define MDIO_CLK_25_4 0
#define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20
#define RXQ0_RSS_HSTYP_IPV6_EN 0x10
#define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8
#define RXQ0_RSS_HSTYP_IPV4_EN 0x4
#define MASTER_CTRL_CLK_SEL_DIS 0x1000
#define MDIO_WAIT_TIMES 1000
/* PHY */
#define MII_PSSR 0x11 /* PHY Specific Status Reg */
#define MII_DBG_ADDR 0x1D /* PHY Debug Address Reg */
#define MII_DBG_DATA 0x1E /* PHY Debug Data Reg */
#define MII_INT_ENABLE 0x12 /* PHY Interrupt Enable Reg */
#define MII_INT_STATUS 0x13 /* PHY Interrupt Status Reg */
/* MII_BMCR (0x00) */
#define BMCR_SPEED10 0x0000
/* MII_PSSR (0x11) */
#define PSSR_FC_RXEN 0x0004
#define PSSR_FC_TXEN 0x0008
#define PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
#define PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
#define PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define PSSR_10MBS 0x0000 /* 00=10Mbs */
#define PSSR_100MBS 0x4000 /* 01=100Mbs */
#define PSSR_1000MBS 0x8000 /* 10=1000Mbs */
/* MII DBG registers */
#define HIBERNATE_CTRL_REG 0xB
/* HIBERNATE_CTRL_REG */
#define HIBERNATE_EN 0x8000
/* MII_INT_ENABLE/MII_INT_STATUS */
#define LINK_SUCCESS_INTERRUPT BIT(10)
#define LINK_SUCCESS_BX BIT(7)
#define WOL_INT BIT(0)
#endif /*_EMAC_HW_H_*/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,787 @@
/* SPDX-License-Identifier: GPL-2.0-only
* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _QCOM_EMAC_MAIN_H_
#define _QCOM_EMAC_MAIN_H_
#include <asm/byteorder.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
//#include <linux/wakelock.h>
#include "emac_phy.h"
/* Device IDs */
#define EMAC_DEV_ID 0x0040
/* DMA address */
#define DMA_ADDR_HI_MASK 0xffffffff00000000ULL
#define DMA_ADDR_LO_MASK 0x00000000ffffffffULL
#define EMAC_DMA_ADDR_HI(_addr) \
((u32)(((u64)(_addr) & DMA_ADDR_HI_MASK) >> 32))
#define EMAC_DMA_ADDR_LO(_addr) \
((u32)((u64)(_addr) & DMA_ADDR_LO_MASK))
/* 4 emac core irq and 1 wol irq */
#define EMAC_NUM_CORE_IRQ 4
#define EMAC_CORE0_IRQ 0
#define EMAC_CORE1_IRQ 1
#define EMAC_CORE2_IRQ 2
#define EMAC_CORE3_IRQ 3
#define EMAC_WOL_IRQ 4
#define EMAC_IRQ_CNT 5
/* mdio/mdc gpios */
#define EMAC_GPIO_CNT 2
#define EMAC_ADPT_RESET_WAIT_TIME 20
/**
* Requested EMAC votes for BUS bandwidth
*
* EMAC_NO_PERF_VOTE BUS Vote for inactive EMAC session or disconnect
* EMAC_MAX_PERF_VOTE Maximum BUS bandwidth vote
*
*/
enum emac_bus_vote {
EMAC_NO_PERF_VOTE = 0,
EMAC_MAX_PERF_VOTE
};
enum emac_vreg_id {
EMAC_VREG1,
EMAC_VREG2,
EMAC_VREG3,
EMAC_VREG4,
EMAC_VREG5,
EMAC_VREG_CNT
};
enum emac_clk_id {
EMAC_CLK_AXI,
EMAC_CLK_CFG_AHB,
EMAC_CLK_HIGH_SPEED,
EMAC_CLK_MDIO,
EMAC_CLK_TX,
EMAC_CLK_RX,
EMAC_CLK_SYS,
EMAC_CLK_CNT
};
#define KHz(RATE) ((RATE) * 1000)
#define MHz(RATE) (KHz(RATE) * 1000)
enum emac_clk_rate {
EMC_CLK_RATE_2_5MHZ = KHz(2500),
EMC_CLK_RATE_19_2MHZ = KHz(19200),
EMC_CLK_RATE_25MHZ = MHz(25),
EMC_CLK_RATE_125MHZ = MHz(125),
};
#define EMAC_LINK_SPEED_UNKNOWN 0x0
#define EMAC_LINK_SPEED_10_HALF 0x0001
#define EMAC_LINK_SPEED_10_FULL 0x0002
#define EMAC_LINK_SPEED_100_HALF 0x0004
#define EMAC_LINK_SPEED_100_FULL 0x0008
#define EMAC_LINK_SPEED_1GB_FULL 0x0020
#define EMAC_MAX_SETUP_LNK_CYCLE 100
/* Wake On Lan */
#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */
#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */
enum emac_reg_bases {
EMAC,
EMAC_CSR,
EMAC_1588,
NUM_EMAC_REG_BASES
};
/* DMA Order Settings */
enum emac_dma_order {
emac_dma_ord_in = 1,
emac_dma_ord_enh = 2,
emac_dma_ord_out = 4
};
enum emac_dma_req_block {
emac_dma_req_128 = 0,
emac_dma_req_256 = 1,
emac_dma_req_512 = 2,
emac_dma_req_1024 = 3,
emac_dma_req_2048 = 4,
emac_dma_req_4096 = 5
};
/* IEEE1588 */
enum emac_ptp_clk_mode {
emac_ptp_clk_mode_oc_two_step,
emac_ptp_clk_mode_oc_one_step
};
enum emac_ptp_mode {
emac_ptp_mode_slave,
emac_ptp_mode_master
};
struct emac_hw_stats {
/* rx */
u64 rx_ok; /* good packets */
u64 rx_bcast; /* good broadcast packets */
u64 rx_mcast; /* good multicast packets */
u64 rx_pause; /* pause packet */
u64 rx_ctrl; /* control packets other than pause frame. */
u64 rx_fcs_err; /* packets with bad FCS. */
u64 rx_len_err; /* packets with length mismatch */
u64 rx_byte_cnt; /* good bytes count (without FCS) */
u64 rx_runt; /* runt packets */
u64 rx_frag; /* fragment count */
u64 rx_sz_64; /* packets that are 64 bytes */
u64 rx_sz_65_127; /* packets that are 65-127 bytes */
u64 rx_sz_128_255; /* packets that are 128-255 bytes */
u64 rx_sz_256_511; /* packets that are 256-511 bytes */
u64 rx_sz_512_1023; /* packets that are 512-1023 bytes */
u64 rx_sz_1024_1518; /* packets that are 1024-1518 bytes */
u64 rx_sz_1519_max; /* packets that are 1519-MTU bytes*/
u64 rx_sz_ov; /* packets that are >MTU bytes (truncated) */
u64 rx_rxf_ov; /* packets dropped due to RX FIFO overflow */
u64 rx_align_err; /* alignment errors */
u64 rx_bcast_byte_cnt; /* broadcast packets byte count (without FCS) */
u64 rx_mcast_byte_cnt; /* multicast packets byte count (without FCS) */
u64 rx_err_addr; /* packets dropped due to address filtering */
u64 rx_crc_align; /* CRC align errors */
u64 rx_jubbers; /* jubbers */
/* tx */
u64 tx_ok; /* good packets */
u64 tx_bcast; /* good broadcast packets */
u64 tx_mcast; /* good multicast packets */
u64 tx_pause; /* pause packets */
u64 tx_exc_defer; /* packets with excessive deferral */
u64 tx_ctrl; /* control packets other than pause frame */
u64 tx_defer; /* packets that are deferred. */
u64 tx_byte_cnt; /* good bytes count (without FCS) */
u64 tx_sz_64; /* packets that are 64 bytes */
u64 tx_sz_65_127; /* packets that are 65-127 bytes */
u64 tx_sz_128_255; /* packets that are 128-255 bytes */
u64 tx_sz_256_511; /* packets that are 256-511 bytes */
u64 tx_sz_512_1023; /* packets that are 512-1023 bytes */
u64 tx_sz_1024_1518; /* packets that are 1024-1518 bytes */
u64 tx_sz_1519_max; /* packets that are 1519-MTU bytes */
u64 tx_1_col; /* packets single prior collision */
u64 tx_2_col; /* packets with multiple prior collisions */
u64 tx_late_col; /* packets with late collisions */
u64 tx_abort_col; /* packets aborted due to excess collisions */
u64 tx_underrun; /* packets aborted due to FIFO underrun */
u64 tx_rd_eop; /* count of reads beyond EOP */
u64 tx_len_err; /* packets with length mismatch */
u64 tx_trunc; /* packets truncated due to size >MTU */
u64 tx_bcast_byte; /* broadcast packets byte count (without FCS) */
u64 tx_mcast_byte; /* multicast packets byte count (without FCS) */
u64 tx_col; /* collisions */
spinlock_t lock; /* prevent multiple simultaneous readers */
};
enum emac_hw_flags {
EMAC_FLAG_HW_PROMISC_EN,
EMAC_FLAG_HW_VLANSTRIP_EN,
EMAC_FLAG_HW_MULTIALL_EN,
EMAC_FLAG_HW_LOOPBACK_EN,
EMAC_FLAG_HW_PTP_CAP,
EMAC_FLAG_HW_PTP_EN,
EMAC_FLAG_HW_TS_RX_EN,
EMAC_FLAG_HW_TS_TX_EN,
};
enum emac_adapter_flags {
EMAC_FLAG_ADPT_STATE_RESETTING,
EMAC_FLAG_ADPT_STATE_DOWN,
EMAC_FLAG_ADPT_STATE_WATCH_DOG,
EMAC_FLAG_ADPT_TASK_REINIT_REQ,
EMAC_FLAG_ADPT_TASK_LSC_REQ,
EMAC_FLAG_ADPT_TASK_CHK_SGMII_REQ,
};
/* emac shorthand bitops macros */
#define TEST_FLAG(OBJ, FLAG) test_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags))
#define SET_FLAG(OBJ, FLAG) set_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags))
#define CLR_FLAG(OBJ, FLAG) clear_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags))
#define TEST_N_SET_FLAG(OBJ, FLAG) \
test_and_set_bit(EMAC_FLAG_ ## FLAG, &((OBJ)->flags))
struct emac_hw {
void __iomem *reg_addr[NUM_EMAC_REG_BASES];
u16 devid;
u16 revid;
/* ring parameter */
u8 tpd_burst;
u8 rfd_burst;
u8 dmaw_dly_cnt;
u8 dmar_dly_cnt;
enum emac_dma_req_block dmar_block;
enum emac_dma_req_block dmaw_block;
enum emac_dma_order dma_order;
/* RSS parameter */
u8 rss_hstype;
u8 rss_base_cpu;
u16 rss_idt_size;
u32 rss_idt[32];
u8 rss_key[40];
bool rss_initialized;
/* 1588 parameter */
enum emac_ptp_clk_mode ptp_clk_mode;
enum emac_ptp_mode ptp_mode;
u32 ptp_intr_mask;
spinlock_t ptp_lock; /* sync access to ptp hw */
u32 tstamp_rx_offset;
u32 tstamp_tx_offset;
void *frac_ns_adj_tbl;
u32 frac_ns_adj_tbl_sz;
s32 frac_ns_adj;
u32 irq_mod;
u32 preamble;
unsigned long flags;
};
/* RSS hstype Definitions */
#define EMAC_RSS_HSTYP_IPV4_EN 0x00000001
#define EMAC_RSS_HSTYP_TCP4_EN 0x00000002
#define EMAC_RSS_HSTYP_IPV6_EN 0x00000004
#define EMAC_RSS_HSTYP_TCP6_EN 0x00000008
#define EMAC_RSS_HSTYP_ALL_EN (\
EMAC_RSS_HSTYP_IPV4_EN |\
EMAC_RSS_HSTYP_TCP4_EN |\
EMAC_RSS_HSTYP_IPV6_EN |\
EMAC_RSS_HSTYP_TCP6_EN)
/******************************************************************************/
/* Logging functions and macros */
#define emac_err(_adpt, _format, ...) \
netdev_err((_adpt)->netdev, _format, ##__VA_ARGS__)
#define emac_info(_adpt, _mlevel, _netdev, _format, ...) \
netif_info(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__)
#define emac_warn(_adpt, _mlevel, _netdev, _format, ...) \
netif_warn(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__)
#define emac_dbg(_adpt, _mlevel, _netdev, _format, ...) \
netif_dbg(_adpt, _mlevel, _netdev, _format, ##__VA_ARGS__)
#define EMAC_DEF_RX_BUF_SIZE 1536
#define EMAC_MAX_JUMBO_PKT_SIZE (9 * 1024)
#define EMAC_MAX_TX_OFFLOAD_THRESH (9 * 1024)
#define EMAC_MAX_ETH_FRAME_SIZE EMAC_MAX_JUMBO_PKT_SIZE
#define EMAC_MIN_ETH_FRAME_SIZE 68
#define EMAC_MAX_TX_QUEUES 4
#define EMAC_DEF_TX_QUEUES 1
#define EMAC_ACTIVE_TXQ 0
#define EMAC_MAX_RX_QUEUES 4
#define EMAC_DEF_RX_QUEUES 1
#define EMAC_MIN_TX_DESCS 128
#define EMAC_MIN_RX_DESCS 128
#define EMAC_MAX_TX_DESCS 16383
#define EMAC_MAX_RX_DESCS 2047
#define EMAC_DEF_TX_DESCS 512
#define EMAC_DEF_RX_DESCS 256
#define EMAC_DEF_RX_IRQ_MOD 250
#define EMAC_DEF_TX_IRQ_MOD 250
#define EMAC_WATCHDOG_TIME (5 * HZ)
/* RRD */
/* general parameter format of rrd */
struct emac_sw_rrdes_general {
/* dword 0 */
u32 xsum:16;
u32 nor:4; /* number of RFD */
u32 si:12; /* start index of rfd-ring */
/* dword 1 */
u32 hash;
/* dword 2 */
u32 cvlan_tag:16; /* vlan-tag */
u32 reserved:8;
u32 ptp_timestamp:1;
u32 rss_cpu:3; /* CPU number used by RSS */
u32 rss_flag:4; /* rss_flag 0, TCP(IPv6) flag for RSS hash algrithm
* rss_flag 1, IPv6 flag for RSS hash algrithm
* rss_flag 2, TCP(IPv4) flag for RSS hash algrithm
* rss_flag 3, IPv4 flag for RSS hash algrithm
*/
/* dword 3 */
u32 pkt_len:14; /* length of the packet */
u32 l4f:1; /* L4(TCP/UDP) checksum failed */
u32 ipf:1; /* IP checksum failed */
u32 cvlan_flag:1; /* vlan tagged */
u32 pid:3;
u32 res:1; /* received error summary */
u32 crc:1; /* crc error */
u32 fae:1; /* frame alignment error */
u32 trunc:1; /* truncated packet, larger than MTU */
u32 runt:1; /* runt packet */
u32 icmp:1; /* incomplete packet due to insufficient rx-desc*/
u32 bar:1; /* broadcast address received */
u32 mar:1; /* multicast address received */
u32 type:1; /* ethernet type */
u32 fov:1; /* fifo overflow */
u32 lene:1; /* length error */
u32 update:1; /* update */
/* dword 4 */
u32 ts_low:30;
u32 __unused__:2;
/* dword 5 */
u32 ts_high;
};
/* EMAC Errors in emac_sw_rrdesc.dfmt.dw[3] */
#define EMAC_RRDES_L4F BIT(14)
#define EMAC_RRDES_IPF BIT(15)
#define EMAC_RRDES_CRC BIT(21)
#define EMAC_RRDES_FAE BIT(22)
#define EMAC_RRDES_TRN BIT(23)
#define EMAC_RRDES_RNT BIT(24)
#define EMAC_RRDES_INC BIT(25)
#define EMAC_RRDES_FOV BIT(29)
#define EMAC_RRDES_LEN BIT(30)
union emac_sw_rrdesc {
struct emac_sw_rrdes_general genr;
/* dword flat format */
struct {
u32 dw[6];
} dfmt;
};
/* RFD */
/* general parameter format of rfd */
struct emac_sw_rfdes_general {
u64 addr;
};
union emac_sw_rfdesc {
struct emac_sw_rfdes_general genr;
/* dword flat format */
struct {
u32 dw[2];
} dfmt;
};
/* TPD */
/* general parameter format of tpd */
struct emac_sw_tpdes_general {
/* dword 0 */
u32 buffer_len:16; /* include 4-byte CRC */
u32 svlan_tag:16;
/* dword 1 */
u32 l4hdr_offset:8; /* l4 header offset to the 1st byte of packet */
u32 c_csum:1;
u32 ip_csum:1;
u32 tcp_csum:1;
u32 udp_csum:1;
u32 lso:1;
u32 lso_v2:1;
u32 svtagged:1; /* vlan-id tagged already */
u32 ins_svtag:1; /* insert vlan tag */
u32 ipv4:1; /* ipv4 packet */
u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */
u32 reserve:12;
u32 epad:1; /* even byte padding when this packet */
u32 last_frag:1; /* last fragment(buffer) of the packet */
/* dword 2 */
u32 addr_lo;
/* dword 3 */
u32 cvlan_tag:16;
u32 cvtagged:1;
u32 ins_cvtag:1;
u32 addr_hi:13;
u32 tstmp_sav:1;
};
/* custom checksum parameter format of tpd */
struct emac_sw_tpdes_checksum {
/* dword 0 */
u32 buffer_len:16;
u32 svlan_tag:16;
/* dword 1 */
u32 payld_offset:8; /* payload offset to the 1st byte of packet */
u32 c_csum:1; /* do custom checksum offload */
u32 ip_csum:1; /* do ip(v4) header checksum offload */
u32 tcp_csum:1; /* do tcp checksum offload, both ipv4 and ipv6 */
u32 udp_csum:1; /* do udp checksum offload, both ipv4 and ipv6 */
u32 lso:1;
u32 lso_v2:1;
u32 svtagged:1; /* vlan-id tagged already */
u32 ins_svtag:1; /* insert vlan tag */
u32 ipv4:1; /* ipv4 packet */
u32 type:1; /* type of packet (ethernet_ii(0) or snap(1)) */
u32 cxsum_offset:8; /* checksum offset to the 1st byte of packet */
u32 reserve:4;
u32 epad:1; /* even byte padding when this packet */
u32 last_frag:1; /* last fragment(buffer) of the packet */
/* dword 2 */
u32 addr_lo;
/* dword 3 */
u32 cvlan_tag:16;
u32 cvtagged:1;
u32 ins_cvtag:1;
u32 addr_hi:14;
};
/* tcp large send format (v1/v2) of tpd */
struct emac_sw_tpdes_tso {
/* dword 0 */
u32 buffer_len:16; /* include 4-byte CRC */
u32 svlan_tag:16;
/* dword 1 */
u32 tcphdr_offset:8; /* tcp hdr offset to the 1st byte of packet */
u32 c_csum:1;
u32 ip_csum:1;
u32 tcp_csum:1;
u32 udp_csum:1;
u32 lso:1; /* do tcp large send (ipv4 only) */
u32 lso_v2:1; /* must be 0 in this format */
u32 svtagged:1; /* vlan-id tagged already */
u32 ins_svtag:1; /* insert vlan tag */
u32 ipv4:1; /* ipv4 packet */
u32 type:1; /* type of packet (ethernet_ii(1) or snap(0)) */
u32 mss:13; /* mss if do tcp large send */
u32 last_frag:1; /* last fragment(buffer) of the packet */
/* dword 2 & 3 */
u64 pkt_len:32; /* packet length in ext tpd */
u64 reserve:32;
};
union emac_sw_tpdesc {
struct emac_sw_tpdes_general genr;
struct emac_sw_tpdes_checksum csum;
struct emac_sw_tpdes_tso tso;
/* dword flat format */
struct {
u32 dw[4];
} dfmt;
};
#define EMAC_RRD(_que, _size, _i) \
((_que)->rrd.rrdesc + ((_size) * (_i)))
#define EMAC_RFD(_que, _size, _i) \
((_que)->rfd.rfdesc + ((_size) * (_i)))
#define EMAC_TPD(_que, _size, _i) \
((_que)->tpd.tpdesc + ((_size) * (_i)))
#define EMAC_TPD_LAST_FRAGMENT 0x80000000
#define EMAC_TPD_TSTAMP_SAVE 0x80000000
/* emac_irq_per_dev per-device (per-adapter) irq properties.
* @idx: index of this irq entry in the adapter irq array.
* @irq: irq number.
* @mask mask to use over status register.
*/
struct emac_irq_per_dev {
int idx;
unsigned int irq;
u32 mask;
};
/* emac_irq_common irq properties which are common to all devices of this driver
* @name name in configuration (devicetree).
* @handler ISR.
* @status_reg status register offset.
* @mask_reg mask register offset.
* @init_mask initial value for mask to use over status register.
* @irqflags request_irq() flags.
*/
struct emac_irq_common {
char *name;
irq_handler_t handler;
u32 status_reg;
u32 mask_reg;
u32 init_mask;
unsigned long irqflags;
};
/* emac_irq_cmn_tbl a table of common irq properties to all devices of this
* driver.
*/
extern const struct emac_irq_common emac_irq_cmn_tbl[];
struct emac_clk {
struct clk *clk;
bool enabled;
};
struct emac_regulator {
struct regulator *vreg;
int voltage_uv;
bool enabled;
};
/* emac_ring_header represents a single, contiguous block of DMA space
* mapped for the three descriptor rings (tpd, rfd, rrd)
*/
struct emac_ring_header {
void *desc; /* virtual address */
dma_addr_t dma; /* physical address */
unsigned int size; /* length in bytes */
unsigned int used;
};
/* emac_buffer is wrapper around a pointer to a socket buffer
* so a DMA handle can be stored along with the skb
*/
struct emac_buffer {
struct sk_buff *skb; /* socket buffer */
u16 length; /* rx buffer length */
dma_addr_t dma;
};
/* receive free descriptor (rfd) ring */
struct emac_rfd_ring {
struct emac_buffer *rfbuff;
u32 __iomem *rfdesc; /* virtual address */
dma_addr_t rfdma; /* physical address */
u64 size; /* length in bytes */
u32 count; /* number of descriptors in the ring */
u32 produce_idx;
u32 process_idx;
u32 consume_idx; /* unused */
};
/* receive return descriptor (rrd) ring */
struct emac_rrd_ring {
u32 __iomem *rrdesc; /* virtual address */
dma_addr_t rrdma; /* physical address */
u64 size; /* length in bytes */
u32 count; /* number of descriptors in the ring */
u32 produce_idx; /* unused */
u32 consume_idx;
};
/* rx queue */
struct emac_rx_queue {
struct device *dev; /* device for dma mapping */
struct net_device *netdev; /* netdev ring belongs to */
struct emac_rrd_ring rrd;
struct emac_rfd_ring rfd;
struct napi_struct napi;
u16 que_idx; /* index in multi rx queues*/
u16 produce_reg;
u32 produce_mask;
u8 produce_shft;
u16 process_reg;
u32 process_mask;
u8 process_shft;
u16 consume_reg;
u32 consume_mask;
u8 consume_shft;
u32 intr;
struct emac_irq_per_dev *irq;
};
#define GET_RFD_BUFFER(_rque, _i) (&((_rque)->rfd.rfbuff[(_i)]))
/* transimit packet descriptor (tpd) ring */
struct emac_tpd_ring {
struct emac_buffer *tpbuff;
u32 __iomem *tpdesc; /* virtual address */
dma_addr_t tpdma; /* physical address */
u64 size; /* length in bytes */
u32 count; /* number of descriptors in the ring */
u32 produce_idx;
u32 consume_idx;
u32 last_produce_idx;
};
#define EMAC_HWTXTSTAMP_FIFO_DEPTH 8
#define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD EMAC_HWTXTSTAMP_FIFO_DEPTH
/* HW tx timestamp */
struct emac_hwtxtstamp {
u32 ts_idx;
u32 sec;
u32 ns;
};
struct emac_tx_tstamp_stats {
u32 tx;
u32 rx;
u32 deliver;
u32 drop;
u32 lost;
u32 timeout;
u32 sched;
u32 poll;
u32 tx_poll;
};
/* tx queue */
struct emac_tx_queue {
struct device *dev; /* device for dma mapping */
struct net_device *netdev; /* netdev ring belongs to */
struct emac_tpd_ring tpd;
u16 que_idx; /* needed for multiqueue queue management */
u16 max_packets; /* max packets per interrupt */
u16 produce_reg;
u32 produce_mask;
u8 produce_shft;
u16 consume_reg;
u32 consume_mask;
u8 consume_shft;
};
#define GET_TPD_BUFFER(_tque, _i) (&((_tque)->tpd.tpbuff[(_i)]))
/* driver private data structure */
struct emac_adapter {
struct net_device *netdev;
struct mii_bus *mii_bus;
struct phy_device *phydev;
struct emac_phy phy;
struct emac_hw hw;
struct emac_hw_stats hw_stats;
int irq_status;
struct emac_irq_per_dev irq[EMAC_IRQ_CNT];
unsigned int gpio[EMAC_GPIO_CNT];
struct emac_clk clk[EMAC_CLK_CNT];
struct emac_regulator vreg[EMAC_VREG_CNT];
/* dma parameters */
u64 dma_mask;
struct device_dma_parameters dma_parms;
/* All Descriptor memory */
struct emac_ring_header ring_header;
struct emac_tx_queue tx_queue[EMAC_MAX_TX_QUEUES];
struct emac_rx_queue rx_queue[EMAC_MAX_RX_QUEUES];
u16 num_txques;
u16 num_rxques;
u32 num_txdescs;
u32 num_rxdescs;
u8 rrdesc_size; /* in quad words */
u8 rfdesc_size; /* in quad words */
u8 tpdesc_size; /* in quad words */
u32 rxbuf_size;
/* True == use single-pause-frame mode. */
bool single_pause_mode;
/* tx timestamping queue */
struct sk_buff_head hwtxtstamp_pending_queue;
struct sk_buff_head hwtxtstamp_ready_queue;
struct work_struct hwtxtstamp_task;
spinlock_t hwtxtstamp_lock; /* lock for hwtxtstamp */
struct emac_tx_tstamp_stats hwtxtstamp_stats;
struct work_struct work_thread;
struct timer_list emac_timer;
unsigned long link_jiffies;
bool tstamp_en;
u32 wol;
u16 msg_enable;
unsigned long flags;
struct pinctrl *pinctrl;
struct pinctrl_state *mdio_pins_clk_active;
struct pinctrl_state *mdio_pins_clk_sleep;
struct pinctrl_state *mdio_pins_data_active;
struct pinctrl_state *mdio_pins_data_sleep;
struct pinctrl_state *ephy_pins_active;
struct pinctrl_state *ephy_pins_sleep;
int (*gpio_on)(struct emac_adapter *adpt, bool mdio, bool ephy);
int (*gpio_off)(struct emac_adapter *adpt, bool mdio, bool ephy);
struct wakeup_source *link_wlock;
u32 bus_cl_hdl;
struct msm_bus_scale_pdata *bus_scale_table;
};
static inline struct emac_adapter *emac_hw_get_adap(struct emac_hw *hw)
{
return container_of(hw, struct emac_adapter, hw);
}
static inline
struct emac_adapter *emac_irq_get_adpt(struct emac_irq_per_dev *irq)
{
struct emac_irq_per_dev *irq_0 = irq - irq->idx;
/* why using __builtin_offsetof() and not container_of() ?
* container_of(irq_0, struct emac_adapter, irq) fails to compile
* because emac->irq is of array type.
*/
return (struct emac_adapter *)
((char *)irq_0 - __builtin_offsetof(struct emac_adapter, irq));
}
/* default to trying for four seconds */
#define EMAC_TRY_LINK_TIMEOUT (4 * 1000)
#define EMAC_HW_CTRL_RESET_MAC 0x00000001
void emac_set_ethtool_ops(struct net_device *netdev);
int emac_reinit_locked(struct emac_adapter *adpt);
void emac_update_hw_stats(struct emac_adapter *adpt);
int emac_resize_rings(struct net_device *netdev);
int emac_mac_up(struct emac_adapter *adpt);
void emac_mac_down(struct emac_adapter *adpt, u32 ctrl);
int emac_clk_set_rate(struct emac_adapter *adpt, enum emac_clk_id id,
enum emac_clk_rate rate);
void emac_task_schedule(struct emac_adapter *adpt);
void emac_check_lsc(struct emac_adapter *adpt);
void emac_wol_gpio_irq(struct emac_adapter *adpt, bool enable);
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret)
memset(ret, 0, size);
return ret;
}
#endif /* _QCOM_EMAC_MAIN_H_ */

View file

@ -0,0 +1,270 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* MSM EMAC PHY Controller driver.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
#include <linux/acpi.h>
#include <linux/phy.h>
#include <linux/pm_runtime.h>
#include "emac_hw.h"
#include "emac_defines.h"
#include "emac_regs.h"
#include "emac_phy.h"
#include "emac_rgmii.h"
#include "emac_sgmii.h"
static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
struct emac_adapter *adpt = bus->priv;
struct emac_hw *hw = &adpt->hw;
u32 reg = 0;
int ret = 0;
if (pm_runtime_enabled(adpt->netdev->dev.parent) &&
pm_runtime_status_suspended(adpt->netdev->dev.parent)) {
emac_dbg(adpt, hw, adpt->netdev, "EMAC in suspended state\n");
return ret;
}
emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK,
(addr << PHY_ADDR_SHFT));
wmb(); /* ensure PHY address is set before we proceed */
reg = reg & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK |
MDIO_MODE | MDIO_PR);
reg = SUP_PREAMBLE |
((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
MDIO_START | MDIO_RD_NWR;
emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, reg);
mb(); /* ensure hw starts the operation before we check for result */
if (readl_poll_timeout(hw->reg_addr[EMAC] + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)),
100, MDIO_WAIT_TIMES * 100)) {
emac_err(adpt, "error reading phy addr %d phy reg 0x%02x\n",
addr, regnum);
ret = -EIO;
} else {
ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK;
emac_dbg(adpt, hw, adpt->netdev, "EMAC PHY ADDR %d PHY RD 0x%02x -> 0x%04x\n",
addr, regnum, ret);
}
return ret;
}
static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
{
struct emac_adapter *adpt = bus->priv;
struct emac_hw *hw = &adpt->hw;
u32 reg = 0;
int ret = 0;
if (pm_runtime_enabled(adpt->netdev->dev.parent) &&
pm_runtime_status_suspended(adpt->netdev->dev.parent)) {
emac_dbg(adpt, hw, adpt->netdev, "EMAC in suspended state\n");
return ret;
}
emac_reg_update32(hw, EMAC, EMAC_PHY_STS, PHY_ADDR_BMSK,
(addr << PHY_ADDR_SHFT));
wmb(); /* ensure PHY address is set before we proceed */
reg = reg & ~(MDIO_REG_ADDR_BMSK | MDIO_CLK_SEL_BMSK |
MDIO_DATA_BMSK | MDIO_MODE | MDIO_PR);
reg = SUP_PREAMBLE |
((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) |
((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) |
((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) |
MDIO_START;
emac_reg_w32(hw, EMAC, EMAC_MDIO_CTRL, reg);
mb(); /* ensure hw starts the operation before we check for result */
if (readl_poll_timeout(hw->reg_addr[EMAC] + EMAC_MDIO_CTRL, reg,
!(reg & (MDIO_START | MDIO_BUSY)), 100,
MDIO_WAIT_TIMES * 100)) {
emac_err(adpt, "error writing phy addr %d phy reg 0x%02x data 0x%02x\n",
addr, regnum, val);
ret = -EIO;
} else {
emac_dbg(adpt, hw, adpt->netdev, "EMAC PHY Addr %d PHY WR 0x%02x <- 0x%04x\n",
addr, regnum, val);
}
return ret;
}
int emac_phy_config_fc(struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
struct emac_hw *hw = &adpt->hw;
u32 mac;
if (phy->disable_fc_autoneg || !phy->external)
phy->cur_fc_mode = phy->req_fc_mode;
mac = emac_reg_r32(hw, EMAC, EMAC_MAC_CTRL);
switch (phy->cur_fc_mode) {
case EMAC_FC_NONE:
mac &= ~(RXFC | TXFC);
break;
case EMAC_FC_RX_PAUSE:
mac &= ~TXFC;
mac |= RXFC;
break;
case EMAC_FC_TX_PAUSE:
mac |= TXFC;
mac &= ~RXFC;
break;
case EMAC_FC_FULL:
case EMAC_FC_DEFAULT:
mac |= (TXFC | RXFC);
break;
default:
emac_err(adpt, "flow control param set incorrectly\n");
return -EINVAL;
}
emac_reg_w32(hw, EMAC, EMAC_MAC_CTRL, mac);
/* ensure flow control config is slushed to hw */
wmb();
return 0;
}
/* Configure the MDIO bus and connect the external PHY */
int emac_phy_config_external(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct device_node *np = pdev->dev.of_node;
struct mii_bus *mii_bus;
int ret;
u32 phy_id = 0;
/* Create the mii_bus object for talking to the MDIO bus */
mii_bus = devm_mdiobus_alloc(&pdev->dev);
adpt->mii_bus = mii_bus;
if (!mii_bus)
return -ENOMEM;
mii_bus->name = "emac-mdio";
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
mii_bus->read = emac_mdio_read;
mii_bus->write = emac_mdio_write;
mii_bus->parent = &pdev->dev;
mii_bus->priv = adpt;
if (ACPI_COMPANION(&pdev->dev)) {
u32 phy_addr;
ret = mdiobus_register(mii_bus);
if (ret) {
emac_err(adpt, "could not register mdio bus\n");
return ret;
}
ret = device_property_read_u32(&pdev->dev, "phy-channel",
&phy_addr);
if (ret) {
/* If we can't read a valid phy address, then assume
* that there is only one phy on this mdio bus.
*/
adpt->phydev = phy_find_first(mii_bus);
} else {
emac_err(adpt, "could not get external phy dev\n");
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
}
} else {
struct device_node *phy_np;
//struct module *at803x_module = NULL;
//at803x_module = find_module("Qualcomm Technologies, Inc. Atheros AR8031/AR8033");
ret = of_mdiobus_register(mii_bus, np);
//ret = __of_mdiobus_register(mii_bus, np, at803x_module);
if (ret) {
emac_err(adpt, "could not register mdio bus\n");
return ret;
}
phy_np = of_parse_phandle(np, "phy-handle", 0);
adpt->phydev = of_phy_find_device(phy_np);
of_node_put(phy_np);
}
if (!adpt->phydev) {
emac_err(adpt, "could not find external phy\n");
mdiobus_unregister(mii_bus);
return -ENODEV;
}
phy_id = adpt->phydev->phy_id;
/*if (adpt->phydev->phy_id == (u32)0) {
* emac_err(adpt, "External phy is not up\n");
* mdiobus_unregister(mii_bus);
* return -EPROBE_DEFER;
* }
*/
if (adpt->phydev->drv) {
emac_dbg(adpt, probe, adpt->netdev, "attached PHY driver [%s] ",
adpt->phydev->drv->name);
emac_dbg(adpt, probe, adpt->netdev, "(mii_bus:phy_addr=%s, irq=%d)\n",
dev_name(&adpt->phydev->mdio.dev), adpt->phydev->irq);
}
/* Set initial link status to false */
adpt->phydev->link = 0;
return 0;
}
int emac_phy_config_internal(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
struct device_node *dt = pdev->dev.of_node;
int ret;
phy->external = !of_property_read_bool(dt, "qcom,no-external-phy");
/* Get the link mode */
ret = of_get_phy_mode(dt, &phy->phy_interface);
if (ret < 0) {
emac_err(adpt, "unknown phy mode: %s\n", phy_modes(ret));
return ret;
}
switch (phy->phy_interface) {
case PHY_INTERFACE_MODE_RGMII:
phy->ops = emac_rgmii_ops;
break;
case PHY_INTERFACE_MODE_SGMII:
phy->ops = emac_sgmii_ops;
break;
default:
emac_err(adpt, "unsupported phy mode: %s\n", phy_modes(ret));
return -EINVAL;
}
ret = phy->ops.config(pdev, adpt);
if (ret)
return ret;
return 0;
}

View file

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __EMAC_PHY_H__
#define __EMAC_PHY_H__
#include <linux/platform_device.h>
#include <linux/phy.h>
struct emac_adapter;
struct emac_phy_ops {
int (*config)(struct platform_device *pdev, struct emac_adapter *adpt);
void (*reset)(struct emac_adapter *adpt);
int (*up)(struct emac_adapter *adpt);
void (*down)(struct emac_adapter *adpt);
int (*link_setup_no_ephy)(struct emac_adapter *adpt);
int (*link_check_no_ephy)(struct emac_adapter *adpt,
struct phy_device *phydev);
void (*tx_clk_set_rate)(struct emac_adapter *adpt);
void (*periodic_task)(struct emac_adapter *adpt);
};
enum emac_flow_ctrl {
EMAC_FC_NONE,
EMAC_FC_RX_PAUSE,
EMAC_FC_TX_PAUSE,
EMAC_FC_FULL,
EMAC_FC_DEFAULT
};
enum emac_phy_map_type {
EMAC_PHY_MAP_DEFAULT = 0,
EMAC_PHY_MAP_MDM9607,
EMAC_PHY_MAP_V2,
EMAC_PHY_MAP_NUM,
};
/* emac_phy - internal emac phy
* @addr mii address
* @id vendor id
* @cur_fc_mode flow control mode in effect
* @req_fc_mode flow control mode requested by caller
* @disable_fc_autoneg Do not auto-negotiate flow control
*/
struct emac_phy {
phy_interface_t phy_interface;
u32 phy_version;
bool external;
struct emac_phy_ops ops;
void *private;
/* flow control configuration */
enum emac_flow_ctrl cur_fc_mode;
enum emac_flow_ctrl req_fc_mode;
bool disable_fc_autoneg;
enum emac_phy_map_type board_id;
int link_up;
int link_speed;
int link_duplex;
int link_pause;
bool is_wol_irq_reg;
bool is_wol_enabled;
spinlock_t wol_irq_lock; /* lock for wol irq gpio enablement */
bool is_ext_phy_connect;
};
int emac_phy_config_internal(struct platform_device *pdev,
struct emac_adapter *adpt);
int emac_phy_config_external(struct platform_device *pdev,
struct emac_adapter *adpt);
int emac_phy_setup_link(struct emac_adapter *adpt, u32 speed, bool autoneg,
bool fc);
int emac_phy_setup_link_speed(struct emac_adapter *adpt, u32 speed,
bool autoneg, bool fc);
int emac_phy_check_link(struct emac_adapter *adpt, u32 *speed, bool *link_up);
int emac_phy_get_lpa_speed(struct emac_adapter *adpt, u32 *speed);
int emac_phy_config_fc(struct emac_adapter *adpt);
void emac_phy_reset_external(struct emac_adapter *adpt);
#endif /* __EMAC_PHY_H__ */

View file

@ -0,0 +1,901 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Qualcomm Technologies, Inc. EMAC Ethernet Controller driver.
*/
#include <linux/phy.h>
#include <linux/net_tstamp.h>
#include <linux/of.h>
#include "emac_main.h"
#include "emac_hw.h"
#include "emac_ptp.h"
#define RTC_INC_FRAC_NS_BMSK 0x03ffffff
#define RTC_INC_FRAC_NS_SHFT 0
#define RTC_INC_NS_BMSK 0xfc000000
#define RTC_INC_NS_SHFT 26
#define RTC_NUM_FRAC_NS_PER_NS BIT(RTC_INC_NS_SHFT)
#define TS_TX_FIFO_SYNC_RST (TX_INDX_FIFO_SYNC_RST | TX_TS_FIFO_SYNC_RST)
#define TS_RX_FIFO_SYNC_RST (RX_TS_FIFO1_SYNC_RST | RX_TS_FIFO2_SYNC_RST)
#define TS_FIFO_SYNC_RST (TS_TX_FIFO_SYNC_RST | TS_RX_FIFO_SYNC_RST)
struct emac_tstamp_hw_delay {
int phy_mode;
u32 speed;
u32 tx;
u32 rx;
};
struct emac_ptp_frac_ns_adj {
u32 ref_clk_rate;
s32 adj_val;
};
static const struct emac_tstamp_hw_delay emac_ptp_hw_delay[] = {
{ PHY_INTERFACE_MODE_SGMII, SPEED_1000, 16, 60 },
{ PHY_INTERFACE_MODE_SGMII, SPEED_100, 280, 100 },
{ PHY_INTERFACE_MODE_SGMII, SPEED_10, 2400, 400 },
{ 0 }
};
static inline u32 get_rtc_ref_clkrate(struct emac_hw *hw)
{
struct emac_adapter *adpt = emac_hw_get_adap(hw);
return clk_get_rate(adpt->clk[EMAC_CLK_HIGH_SPEED].clk);
}
static inline bool is_valid_frac_ns_adj(s32 val)
{
if (val >= RTC_NUM_FRAC_NS_PER_NS || (val <= -RTC_NUM_FRAC_NS_PER_NS))
return false;
return true;
}
static s32 get_frac_ns_adj_from_tbl(struct emac_hw *hw)
{
const struct emac_ptp_frac_ns_adj *tbl = hw->frac_ns_adj_tbl;
u32 clk = get_rtc_ref_clkrate(hw);
s32 val = 0;
int i;
for (i = 0; tbl && i < hw->frac_ns_adj_tbl_sz; i++) {
if (tbl[i].ref_clk_rate == clk) {
if (is_valid_frac_ns_adj(tbl[i].adj_val))
val = tbl[i].adj_val;
break;
}
}
return val;
}
static int emac_hw_set_rtc_inc_value(struct emac_hw *hw, s32 adj)
{
u32 clk = get_rtc_ref_clkrate(hw);
u32 ns, frac, rem, inc;
u64 v;
ns = div_u64_rem(1000000000LL, clk, &rem);
v = (u64)rem << RTC_INC_NS_SHFT;
frac = div_u64(v, clk);
if (adj) {
s32 res;
res = (s32)frac + adj;
if (res < 0) {
ns--;
res += RTC_NUM_FRAC_NS_PER_NS;
} else if (res >= RTC_NUM_FRAC_NS_PER_NS) {
ns++;
res -= RTC_NUM_FRAC_NS_PER_NS;
}
frac = (u32)res;
}
inc = (ns << RTC_INC_NS_SHFT) | frac;
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_INC_VALUE_2,
(inc >> 16) & INC_VALUE_2_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_INC_VALUE_1,
inc & INC_VALUE_1_BMSK);
wmb(); /* ensure P1588_INC_VALUE is set before we proceed */
return 0;
}
static const struct emac_tstamp_hw_delay *emac_get_ptp_hw_delay(u32 link_speed,
int phy_mode)
{
const struct emac_tstamp_hw_delay *info = emac_ptp_hw_delay;
for (info = emac_ptp_hw_delay; info->phy_mode; info++) {
if (info->phy_mode == phy_mode && info->speed == link_speed)
return info;
}
return NULL;
}
static int emac_hw_adjust_tstamp_offset(struct emac_hw *hw,
enum emac_ptp_clk_mode clk_mode,
u32 link_speed)
{
const struct emac_tstamp_hw_delay *delay_info;
struct emac_phy *phy = &emac_hw_get_adap(hw)->phy;
delay_info = emac_get_ptp_hw_delay(link_speed, phy->phy_interface);
if (clk_mode == emac_ptp_clk_mode_oc_one_step) {
u32 latency = (delay_info) ? delay_info->tx : 0;
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_TX_LATENCY,
TX_LATENCY_BMSK, latency << TX_LATENCY_SHFT);
wmb(); /* ensure that the latency time is flushed to HW */
}
if (delay_info) {
hw->tstamp_rx_offset = delay_info->rx;
hw->tstamp_tx_offset = delay_info->tx;
} else {
hw->tstamp_rx_offset = 0;
hw->tstamp_tx_offset = 0;
}
return 0;
}
static int emac_hw_config_tx_tstamp(struct emac_hw *hw, bool enable)
{
if (enable) {
/* Reset the TX timestamp FIFO */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_TX_FIFO_SYNC_RST, TS_TX_FIFO_SYNC_RST);
wmb(); /* ensure that the Tx timestamp reset is flushed to HW */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_TX_FIFO_SYNC_RST, 0);
wmb(); /* ensure that the Tx timestamp is out of reset */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TX_TS_ENABLE, TX_TS_ENABLE);
wmb(); /* ensure enabling the Tx timestamp is flushed to HW */
SET_FLAG(hw, HW_TS_TX_EN);
} else {
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TX_TS_ENABLE, 0);
wmb(); /* ensure disabling the Tx timestamp is flushed to HW */
CLR_FLAG(hw, HW_TS_TX_EN);
}
return 0;
}
static int emac_hw_config_rx_tstamp(struct emac_hw *hw, bool enable)
{
if (enable) {
/* Reset the RX timestamp FIFO */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_RX_FIFO_SYNC_RST, TS_RX_FIFO_SYNC_RST);
wmb(); /* ensure that the Rx timestamp reset is flushed to HW */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_RX_FIFO_SYNC_RST, 0);
wmb(); /* ensure that the Rx timestamp is out of reset */
SET_FLAG(hw, HW_TS_RX_EN);
} else {
CLR_FLAG(hw, HW_TS_RX_EN);
}
return 0;
}
static int emac_hw_1588_core_disable(struct emac_hw *hw)
{
if (TEST_FLAG(hw, HW_TS_RX_EN))
emac_hw_config_rx_tstamp(hw, false);
if (TEST_FLAG(hw, HW_TS_TX_EN))
emac_hw_config_tx_tstamp(hw, false);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
DIS_1588_CLKS, DIS_1588_CLKS);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10,
DIS_1588, DIS_1588);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG,
BYPASS_O, BYPASS_O);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, 0);
wmb(); /* ensure that disabling PTP is flushed to HW */
CLR_FLAG(hw, HW_PTP_EN);
return 0;
}
static int emac_hw_1588_core_enable(struct emac_hw *hw,
enum emac_ptp_mode mode,
enum emac_ptp_clk_mode clk_mode,
u32 link_speed,
s32 frac_ns_adj)
{
if (clk_mode != emac_ptp_clk_mode_oc_one_step &&
clk_mode != emac_ptp_clk_mode_oc_two_step) {
struct emac_adapter *adpt = emac_hw_get_adap(hw);
emac_dbg(emac_hw_get_adap(hw), hw, adpt->netdev, "invalid ptp clk mode %d\n",
clk_mode);
return -EINVAL;
}
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
DIS_1588_CLKS, 0);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10, DIS_1588, 0);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, BYPASS_O, 0);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK, 0);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG,
RTC_READ_MODE, RTC_READ_MODE);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ATTACH_EN, 0);
wmb(); /* ensure P1588_CTRL_REG is set before we proceed */
emac_hw_adjust_tstamp_offset(hw, clk_mode, link_speed);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, CLOCK_MODE_BMSK,
(clk_mode << CLOCK_MODE_SHFT));
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ETH_MODE_SW,
(link_speed == EMAC_LINK_SPEED_1GB_FULL) ?
0 : ETH_MODE_SW);
/* set RTC increment every 8ns to fit 125MHZ clock */
emac_hw_set_rtc_inc_value(hw, frac_ns_adj);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR10,
RD_CLR_1588, RD_CLR_1588);
wmb(); /* ensure clear-on-read is enabled on PTP config registers */
emac_reg_r32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_STATUS);
/* Reset the timestamp FIFO */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_FIFO_SYNC_RST, TS_FIFO_SYNC_RST);
wmb(); /* ensure timestamp reset is complete */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1,
TS_FIFO_SYNC_RST, 0);
wmb(); /* ensure timestamp is out of reset */
if (mode == emac_ptp_mode_master)
emac_reg_update32(hw, EMAC_1588,
EMAC_P1588_GRAND_MASTER_CONFIG_0,
GRANDMASTER_MODE | GM_PPS_SYNC,
GRANDMASTER_MODE);
else
emac_reg_update32(hw, EMAC_1588,
EMAC_P1588_GRAND_MASTER_CONFIG_0,
GRANDMASTER_MODE | GM_PPS_SYNC, 0);
wmb(); /* ensure gradmaster mode setting is flushed to HW */
SET_FLAG(hw, HW_PTP_EN);
return 0;
}
static void rtc_settime(struct emac_hw *hw, const struct timespec64 *ts)
{
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_5, 0);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_4,
(ts->tv_sec >> 16) & RTC_PRELOADED_4_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_3,
ts->tv_sec & RTC_PRELOADED_3_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_2,
(ts->tv_nsec >> 16) & RTC_PRELOADED_2_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_RTC_PRELOADED_1,
ts->tv_nsec & RTC_PRELOADED_1_BMSK);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG,
LOAD_RTC, LOAD_RTC);
wmb(); /* ensure RTC setting is flushed to HW */
}
static void rtc_gettime(struct emac_hw *hw, struct timespec64 *ts)
{
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_RTC_EXPANDED_CONFIG,
RTC_SNAPSHOT, RTC_SNAPSHOT);
wmb(); /* ensure snapshot is saved before reading it back */
ts->tv_sec = emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_5,
REAL_TIME_5_BMSK, REAL_TIME_5_SHFT);
ts->tv_sec = (u64)ts->tv_sec << 32;
ts->tv_sec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_4,
REAL_TIME_4_BMSK, REAL_TIME_4_SHFT);
ts->tv_sec <<= 16;
ts->tv_sec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_3,
REAL_TIME_3_BMSK, REAL_TIME_3_SHFT);
ts->tv_nsec = emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_2,
REAL_TIME_2_BMSK, REAL_TIME_2_SHFT);
ts->tv_nsec <<= 16;
ts->tv_nsec |= emac_reg_field_r32(hw, EMAC_1588, EMAC_P1588_REAL_TIME_1,
REAL_TIME_1_BMSK, REAL_TIME_1_SHFT);
}
static void rtc_adjtime(struct emac_hw *hw, s64 delta)
{
s32 delta_ns;
s32 delta_sec;
delta_sec = div_s64_rem(delta, 1000000000LL, &delta_ns);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_3, 0);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_2,
(delta_sec >> 16) & SEC_OFFSET_2_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_SEC_OFFSET_1,
delta_sec & SEC_OFFSET_1_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_NANO_OFFSET_2,
(delta_ns >> 16) & NANO_OFFSET_2_BMSK);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_NANO_OFFSET_1,
(delta_ns & NANO_OFFSET_1_BMSK));
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_ADJUST_RTC, 1);
wmb(); /* ensure that RTC adjustment is flushed to HW */
}
static void rtc_ns_sync_pps_in(struct emac_hw *hw)
{
u32 ts;
s64 delta = 0;
ts = emac_reg_r32(hw, EMAC_1588, EMAC_P1588_GM_PPS_TIMESTAMP_2);
ts <<= 16;
ts |= emac_reg_r32(hw, EMAC_1588, EMAC_P1588_GM_PPS_TIMESTAMP_1);
if (ts < 500000000)
delta = 0LL - (s64)ts;
else
delta = 1000000000LL - (s64)ts;
if (delta) {
struct emac_adapter *adpt = emac_hw_get_adap(hw);
rtc_adjtime(hw, delta);
emac_dbg(emac_hw_get_adap(hw), intr, adpt->netdev,
"RTC_SYNC: gm_pps_tstamp_ns 0x%08x, adjust %lldns\n",
ts, delta);
}
}
static void emac_ptp_rtc_ns_sync(struct emac_hw *hw)
{
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
rtc_ns_sync_pps_in(hw);
spin_unlock_irqrestore(&hw->ptp_lock, flag);
}
int emac_ptp_config(struct emac_hw *hw)
{
struct timespec64 ts;
int ret = 0;
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
if (TEST_FLAG(hw, HW_PTP_EN))
goto unlock_out;
hw->frac_ns_adj = get_frac_ns_adj_from_tbl(hw);
ret = emac_hw_1588_core_enable(hw,
hw->ptp_mode,
hw->ptp_clk_mode,
SPEED_1000,
hw->frac_ns_adj);
if (ret)
goto unlock_out;
ktime_get_real_ts64(&ts);
rtc_settime(hw, &ts);
emac_hw_get_adap(hw)->irq[0].mask |= PTP_INT;
hw->ptp_intr_mask = PPS_IN;
unlock_out:
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return ret;
}
int emac_ptp_stop(struct emac_hw *hw)
{
int ret = 0;
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
if (TEST_FLAG(hw, HW_PTP_EN))
ret = emac_hw_1588_core_disable(hw);
hw->ptp_intr_mask = 0;
emac_hw_get_adap(hw)->irq[0].mask &= ~PTP_INT;
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return ret;
}
int emac_ptp_set_linkspeed(struct emac_hw *hw, u32 link_speed)
{
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
emac_reg_update32(hw, EMAC_1588, EMAC_P1588_CTRL_REG, ETH_MODE_SW,
(link_speed == SPEED_1000) ? 0 :
ETH_MODE_SW);
wmb(); /* ensure ETH_MODE_SW is set before we proceed */
emac_hw_adjust_tstamp_offset(hw, hw->ptp_clk_mode, link_speed);
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return 0;
}
void emac_ptp_intr(struct emac_hw *hw)
{
u32 isr, status;
struct emac_adapter *adpt = emac_hw_get_adap(hw);
isr = emac_reg_r32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_STATUS);
status = isr & hw->ptp_intr_mask;
emac_dbg(emac_hw_get_adap(hw), intr, adpt->netdev,
"receive ptp interrupt: isr 0x%x\n", isr);
if (status & PPS_IN)
emac_ptp_rtc_ns_sync(hw);
}
static int emac_ptp_settime(struct emac_hw *hw, const struct timespec64 *ts)
{
int ret = 0;
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
if (!TEST_FLAG(hw, HW_PTP_EN))
ret = -EPERM;
else
rtc_settime(hw, ts);
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return ret;
}
static int emac_ptp_gettime(struct emac_hw *hw, struct timespec64 *ts)
{
int ret = 0;
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
if (!TEST_FLAG(hw, HW_PTP_EN))
ret = -EPERM;
else
rtc_gettime(hw, ts);
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return ret;
}
int emac_ptp_adjtime(struct emac_hw *hw, s64 delta)
{
int ret = 0;
unsigned long flag = 0;
spin_lock_irqsave(&hw->ptp_lock, flag);
if (!TEST_FLAG(hw, HW_PTP_EN))
ret = -EPERM;
else
rtc_adjtime(hw, delta);
spin_unlock_irqrestore(&hw->ptp_lock, flag);
return ret;
}
int emac_tstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_hw *hw = &adpt->hw;
struct hwtstamp_config cfg;
if (!TEST_FLAG(hw, HW_PTP_EN))
return -EPERM;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
emac_hw_config_tx_tstamp(hw, false);
break;
case HWTSTAMP_TX_ON:
if (TEST_FLAG(hw, HW_TS_TX_EN))
break;
emac_hw_config_tx_tstamp(hw, true);
break;
default:
return -ERANGE;
}
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
emac_hw_config_rx_tstamp(hw, false);
break;
default:
cfg.rx_filter = HWTSTAMP_FILTER_ALL;
if (TEST_FLAG(hw, HW_TS_RX_EN))
break;
emac_hw_config_rx_tstamp(hw, true);
break;
}
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
-EFAULT : 0;
}
static ssize_t emac_ptp_sysfs_tstamp_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
struct timespec64 ts;
int ret;
ktime_get_real_ts64(&ts);
ret = emac_ptp_settime(&adpt->hw, &ts);
if (!ret)
ret = count;
return ret;
}
static ssize_t emac_ptp_sysfs_tstamp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
struct timespec64 ts = { 0 };
struct timespec64 ts_now = { 0 };
int count = PAGE_SIZE;
ssize_t retval;
retval = emac_ptp_gettime(&adpt->hw, &ts);
if (retval)
return retval;
ktime_get_real_ts64(&ts_now);
retval = scnprintf(buf, count,
"%12u.%09u tstamp %12u.%08u time-of-day\n",
(int)ts.tv_sec, (int)ts.tv_nsec,
(int)ts_now.tv_sec, (int)ts_now.tv_nsec);
return retval;
}
/* display ethernet mac time as well as the time of the next mac pps pulse */
static ssize_t emac_ptp_sysfs_mtnp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
int count = PAGE_SIZE;
struct timespec64 ts;
ssize_t ret;
ret = emac_ptp_gettime(&adpt->hw, &ts);
if (ret)
return ret;
return scnprintf(buf, count, "%ld %ld %d %ld\n",
ts.tv_sec,
ts.tv_nsec,
(ts.tv_nsec == 0) ? 1 : 0,
(ts.tv_nsec == 0) ? 0 : (NSEC_PER_SEC - ts.tv_nsec));
}
/* Do a "slam" of a very particular time into the time registers... */
static ssize_t emac_ptp_sysfs_slam(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
u32 sec = 0;
u32 nsec = 0;
ssize_t ret = -EINVAL;
if (sscanf(buf, "%u %u", &sec, &nsec) == 2) {
struct timespec64 ts = {sec, nsec};
ret = emac_ptp_settime(&adpt->hw, &ts);
if (ret) {
pr_err("%s: emac_ptp_settime failed.\n", __func__);
return ret;
}
ret = count;
} else {
pr_err("%s: sscanf failed.\n", __func__);
}
return ret;
}
/* Do a coarse time ajustment (ie. coarsely adjust (+/-) the time
* registers by the passed offset)
*/
static ssize_t emac_ptp_sysfs_cadj(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
s64 offset = 0;
ssize_t ret = -EINVAL;
if (!kstrtos64(buf, 10, &offset)) {
struct timespec64 ts;
u64 new_offset;
u32 sec;
u32 nsec;
ret = emac_ptp_gettime(&adpt->hw, &ts);
if (ret) {
pr_err("%s: emac_ptp_gettime failed.\n", __func__);
return ret;
}
sec = ts.tv_sec;
nsec = ts.tv_nsec;
new_offset = (((uint64_t)sec * NSEC_PER_SEC) +
(uint64_t)nsec) + offset;
nsec = do_div(new_offset, NSEC_PER_SEC);
sec = new_offset;
ts.tv_sec = sec;
ts.tv_nsec = nsec;
ret = emac_ptp_settime(&adpt->hw, &ts);
if (ret) {
pr_err("%s: emac_ptp_settime failed.\n", __func__);
return ret;
}
ret = count;
} else {
pr_err("%s: sscanf failed.\n", __func__);
}
return ret;
}
/* Do a fine time ajustment (ie. have the timestamp registers adjust
* themselves by the passed amount).
*/
static ssize_t emac_ptp_sysfs_fadj(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
s64 offset = 0;
ssize_t ret = -EINVAL;
if (!kstrtos64(buf, 10, &offset)) {
ret = emac_ptp_adjtime(&adpt->hw, offset);
if (ret) {
pr_err("%s: emac_ptp_adjtime failed.\n", __func__);
return ret;
}
ret = count;
} else {
pr_err("%s: sscanf failed.\n", __func__);
}
return ret;
}
static ssize_t emac_ptp_sysfs_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
int count = PAGE_SIZE;
ssize_t ret;
ret = scnprintf(buf, count, "%s\n",
(adpt->hw.ptp_mode == emac_ptp_mode_master) ?
"master" : "slave");
return ret;
}
static ssize_t emac_ptp_sysfs_mode_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
struct emac_hw *hw = &adpt->hw;
struct phy_device *phydev = adpt->phydev;
enum emac_ptp_mode mode;
if (!strcmp(buf, "master"))
mode = emac_ptp_mode_master;
else if (!strcmp(buf, "slave"))
mode = emac_ptp_mode_slave;
else
return -EINVAL;
if (mode == hw->ptp_mode)
goto out;
if (TEST_FLAG(hw, HW_PTP_EN)) {
bool rx_tstamp_enable = TEST_FLAG(hw, HW_TS_RX_EN);
bool tx_tstamp_enable = TEST_FLAG(hw, HW_TS_TX_EN);
emac_hw_1588_core_disable(hw);
emac_hw_1588_core_enable(hw, mode, hw->ptp_clk_mode,
phydev->speed, hw->frac_ns_adj);
if (rx_tstamp_enable)
emac_hw_config_rx_tstamp(hw, true);
if (tx_tstamp_enable)
emac_hw_config_tx_tstamp(hw, true);
emac_reg_w32(hw, EMAC_1588, EMAC_P1588_PTP_EXPANDED_INT_MASK,
hw->ptp_intr_mask);
wmb(); /* ensure PTP_EXPANDED_INT_MASK is set */
}
hw->ptp_mode = mode;
out:
return count;
}
static ssize_t emac_ptp_sysfs_frac_ns_adj_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
struct emac_hw *hw = &adpt->hw;
int count = PAGE_SIZE;
ssize_t ret;
if (!TEST_FLAG(hw, HW_PTP_EN))
return -EPERM;
ret = scnprintf(buf, count, "%d\n", adpt->hw.frac_ns_adj);
return ret;
}
static ssize_t emac_ptp_sysfs_frac_ns_adj_set(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct emac_adapter *adpt = netdev_priv(to_net_dev(dev));
struct emac_hw *hw = &adpt->hw;
s32 adj;
if (!TEST_FLAG(hw, HW_PTP_EN))
return -EPERM;
if (kstrtos32(buf, 0, &adj))
return -EINVAL;
if (!is_valid_frac_ns_adj(adj))
return -EINVAL;
emac_hw_set_rtc_inc_value(hw, adj);
hw->frac_ns_adj = adj;
return count;
}
static struct device_attribute ptp_sysfs_devattr[] = {
__ATTR(tstamp, 0660,
emac_ptp_sysfs_tstamp_show, emac_ptp_sysfs_tstamp_set),
__ATTR(mtnp, 0440, emac_ptp_sysfs_mtnp_show, NULL),
__ATTR(slam, 0220, NULL, emac_ptp_sysfs_slam),
__ATTR(cadj, 0220, NULL, emac_ptp_sysfs_cadj),
__ATTR(fadj, 0220, NULL, emac_ptp_sysfs_fadj),
__ATTR(frac_ns_adj, 0660,
emac_ptp_sysfs_frac_ns_adj_show, emac_ptp_sysfs_frac_ns_adj_set),
__ATTR(ptp_mode, 0660,
emac_ptp_sysfs_mode_show, emac_ptp_sysfs_mode_set),
__ATTR_NULL
};
static void emac_ptp_sysfs_create(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct device_attribute *devattr;
for (devattr = ptp_sysfs_devattr; devattr->attr.name; devattr++) {
if (device_create_file(&netdev->dev, devattr)) {
emac_err(adpt,
"emac_ptp: failed to create sysfs files\n");
break;
}
}
}
static void emac_ptp_of_get_property(struct emac_adapter *adpt)
{
struct emac_hw *hw = &adpt->hw;
struct device *parent = adpt->netdev->dev.parent;
struct device_node *node = parent->of_node;
const int *tbl;
struct emac_ptp_frac_ns_adj *adj_tbl = NULL;
int size, tbl_size;
if (of_property_read_bool(node, "qcom,emac-ptp-grandmaster"))
hw->ptp_mode = emac_ptp_mode_master;
else
hw->ptp_mode = emac_ptp_mode_slave;
hw->frac_ns_adj_tbl = NULL;
hw->frac_ns_adj_tbl_sz = 0;
tbl = of_get_property(node, "qcom,emac-ptp-frac-ns-adj", &size);
if (!tbl)
return;
if ((size % sizeof(struct emac_ptp_frac_ns_adj))) {
emac_err(adpt, "emac_ptp: invalid frac-ns-adj tbl size(%d)\n",
size);
return;
}
tbl_size = size / sizeof(struct emac_ptp_frac_ns_adj);
adj_tbl = kzalloc(size, GFP_KERNEL);
if (!adj_tbl)
return;
if (of_property_read_u32_array(node, "qcom,emac-ptp-frac-ns-adj",
(u32 *)adj_tbl, size / sizeof(u32))) {
emac_err(adpt, "emac_ptp: failed to read frac-ns-adj tbl\n");
kfree(adj_tbl);
return;
}
hw->frac_ns_adj_tbl = adj_tbl;
hw->frac_ns_adj_tbl_sz = tbl_size;
}
int emac_ptp_init(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_hw *hw = &adpt->hw;
int ret = 0;
emac_ptp_of_get_property(adpt);
spin_lock_init(&hw->ptp_lock);
emac_ptp_sysfs_create(netdev);
ret = emac_hw_1588_core_disable(hw);
return ret;
}
void emac_ptp_remove(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
struct emac_hw *hw = &adpt->hw;
kfree(hw->frac_ns_adj_tbl);
}

View file

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _EMAC_PTP_H_
#define _EMAC_PTP_H_
int emac_ptp_init(struct net_device *netdev);
void emac_ptp_remove(struct net_device *netdev);
int emac_ptp_config(struct emac_hw *hw);
int emac_ptp_stop(struct emac_hw *hw);
int emac_ptp_set_linkspeed(struct emac_hw *hw, u32 speed);
int emac_tstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void emac_ptp_intr(struct emac_hw *hw);
#endif /* _EMAC_PTP_H_ */

View file

@ -0,0 +1,159 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __EMAC_REGS_H__
#define __EMAC_REGS_H__
#define SGMII_PHY_VERSION_1 1
#define SGMII_PHY_VERSION_2 2
/* EMAC register offsets */
#define EMAC_DMA_MAS_CTRL 0x001400
#define EMAC_TIMER_INIT_VALUE 0x001404
#define EMAC_IRQ_MOD_TIM_INIT 0x001408
#define EMAC_BLK_IDLE_STS 0x00140c
#define EMAC_MDIO_CTRL 0x001414
#define EMAC_PHY_STS 0x001418
#define EMAC_PHY_LINK_DELAY 0x00141c
#define EMAC_SYS_ALIV_CTRL 0x001434
#define EMAC_MDIO_EX_CTRL 0x001440
#define EMAC_MAC_CTRL 0x001480
#define EMAC_MAC_IPGIFG_CTRL 0x001484
#define EMAC_MAC_STA_ADDR0 0x001488
#define EMAC_MAC_STA_ADDR1 0x00148c
#define EMAC_HASH_TAB_REG0 0x001490
#define EMAC_HASH_TAB_REG1 0x001494
#define EMAC_MAC_HALF_DPLX_CTRL 0x001498
#define EMAC_MAX_FRAM_LEN_CTRL 0x00149c
#define EMAC_WOL_CTRL0 0x0014a0
#define EMAC_WOL_CTRL1 0x0014a4
#define EMAC_WOL_CTRL2 0x0014a8
#define EMAC_RSS_KEY0 0x0014b0
#define EMAC_RSS_KEY1 0x0014b4
#define EMAC_RSS_KEY2 0x0014b8
#define EMAC_RSS_KEY3 0x0014bc
#define EMAC_RSS_KEY4 0x0014c0
#define EMAC_RSS_KEY5 0x0014c4
#define EMAC_RSS_KEY6 0x0014c8
#define EMAC_RSS_KEY7 0x0014cc
#define EMAC_RSS_KEY8 0x0014d0
#define EMAC_RSS_KEY9 0x0014d4
#define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0
#define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4
#define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8
#define EMAC_INTER_SRAM_PART9 0x001534
#define EMAC_DESC_CTRL_0 0x001540
#define EMAC_DESC_CTRL_1 0x001544
#define EMAC_DESC_CTRL_2 0x001550
#define EMAC_DESC_CTRL_10 0x001554
#define EMAC_DESC_CTRL_12 0x001558
#define EMAC_DESC_CTRL_13 0x00155c
#define EMAC_DESC_CTRL_3 0x001560
#define EMAC_DESC_CTRL_4 0x001564
#define EMAC_DESC_CTRL_5 0x001568
#define EMAC_DESC_CTRL_14 0x00156c
#define EMAC_DESC_CTRL_15 0x001570
#define EMAC_DESC_CTRL_16 0x001574
#define EMAC_DESC_CTRL_6 0x001578
#define EMAC_DESC_CTRL_8 0x001580
#define EMAC_DESC_CTRL_9 0x001584
#define EMAC_DESC_CTRL_11 0x001588
#define EMAC_TXQ_CTRL_0 0x001590
#define EMAC_TXQ_CTRL_1 0x001594
#define EMAC_TXQ_CTRL_2 0x001598
#define EMAC_RXQ_CTRL_0 0x0015a0
#define EMAC_RXQ_CTRL_1 0x0015a4
#define EMAC_RXQ_CTRL_2 0x0015a8
#define EMAC_RXQ_CTRL_3 0x0015ac
#define EMAC_BASE_CPU_NUMBER 0x0015b8
#define EMAC_DMA_CTRL 0x0015c0
#define EMAC_MAILBOX_0 0x0015e0
#define EMAC_MAILBOX_5 0x0015e4
#define EMAC_MAILBOX_6 0x0015e8
#define EMAC_MAILBOX_13 0x0015ec
#define EMAC_MAILBOX_2 0x0015f4
#define EMAC_MAILBOX_3 0x0015f8
#define EMAC_INT_STATUS 0x001600
#define EMAC_INT_MASK 0x001604
#define EMAC_INT_RETRIG_INIT 0x001608
#define EMAC_MAILBOX_11 0x00160c
#define EMAC_AXI_MAST_CTRL 0x001610
#define EMAC_MAILBOX_12 0x001614
#define EMAC_MAILBOX_9 0x001618
#define EMAC_MAILBOX_10 0x00161c
#define EMAC_ATHR_HEADER_CTRL 0x001620
#define EMAC_RXMAC_STATC_REG0 0x001700
#define EMAC_RXMAC_STATC_REG22 0x001758
#define EMAC_TXMAC_STATC_REG0 0x001760
#define EMAC_TXMAC_STATC_REG24 0x0017c0
#define EMAC_CLK_GATE_CTRL 0x001814
#define EMAC_CORE_HW_VERSION 0x001974
#define EMAC_MISC_CTRL 0x001990
#define EMAC_MAILBOX_7 0x0019e0
#define EMAC_MAILBOX_8 0x0019e4
#define EMAC_IDT_TABLE0 0x001b00
#define EMAC_RXMAC_STATC_REG23 0x001bc8
#define EMAC_RXMAC_STATC_REG24 0x001bcc
#define EMAC_TXMAC_STATC_REG25 0x001bd0
#define EMAC_MAILBOX_15 0x001bd4
#define EMAC_MAILBOX_16 0x001bd8
#define EMAC_INT1_MASK 0x001bf0
#define EMAC_INT1_STATUS 0x001bf4
#define EMAC_INT2_MASK 0x001bf8
#define EMAC_INT2_STATUS 0x001bfc
#define EMAC_INT3_MASK 0x001c00
#define EMAC_INT3_STATUS 0x001c04
/* EMAC_CSR register offsets */
#define EMAC_EMAC_WRAPPER_CSR1 0x000000
#define EMAC_EMAC_WRAPPER_CSR2 0x000004
#define EMAC_EMAC_WRAPPER_CSR3 0x000008
#define EMAC_EMAC_WRAPPER_CSR5 0x000010
#define EMAC_EMAC_WRAPPER_CSR10 0x000024
#define EMAC_EMAC_WRAPPER_CSR18 0x000044
#define EMAC_EMAC_WRAPPER_STATUS 0x000100
#define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104
#define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108
#define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c
/* EMAC_1588 register offsets */
#define EMAC_P1588_CTRL_REG 0x000048
#define EMAC_P1588_TX_LATENCY 0x0000d4
#define EMAC_P1588_INC_VALUE_2 0x0000d8
#define EMAC_P1588_INC_VALUE_1 0x0000dc
#define EMAC_P1588_NANO_OFFSET_2 0x0000e0
#define EMAC_P1588_NANO_OFFSET_1 0x0000e4
#define EMAC_P1588_SEC_OFFSET_3 0x0000e8
#define EMAC_P1588_SEC_OFFSET_2 0x0000ec
#define EMAC_P1588_SEC_OFFSET_1 0x0000f0
#define EMAC_P1588_REAL_TIME_5 0x0000f4
#define EMAC_P1588_REAL_TIME_4 0x0000f8
#define EMAC_P1588_REAL_TIME_3 0x0000fc
#define EMAC_P1588_REAL_TIME_2 0x000100
#define EMAC_P1588_REAL_TIME_1 0x000104
#define EMAC_P1588_ADJUST_RTC 0x000110
#define EMAC_P1588_PTP_EXPANDED_INT_MASK 0x0003c4
#define EMAC_P1588_PTP_EXPANDED_INT_STATUS 0x0003c8
#define EMAC_P1588_RTC_EXPANDED_CONFIG 0x000400
#define EMAC_P1588_RTC_PRELOADED_5 0x000404
#define EMAC_P1588_RTC_PRELOADED_4 0x000408
#define EMAC_P1588_RTC_PRELOADED_3 0x00040c
#define EMAC_P1588_RTC_PRELOADED_2 0x000410
#define EMAC_P1588_RTC_PRELOADED_1 0x000414
#define EMAC_P1588_GRAND_MASTER_CONFIG_0 0x000800
#define EMAC_P1588_GM_PPS_TIMESTAMP_2 0x000814
#define EMAC_P1588_GM_PPS_TIMESTAMP_1 0x000818
#endif /* __EMAC_REGS_H__ */

View file

@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Qualcomm Technologies, Inc. EMAC RGMII Controller driver.
*/
#include "emac_main.h"
#include "emac_hw.h"
/* RGMII specific macros */
#define EMAC_RGMII_PLL_LOCK_TIMEOUT (HZ / 1000) /* 1ms */
#define EMAC_RGMII_CORE_IE_C 0x2001
#define EMAC_RGMII_PLL_L_VAL 0x14
#define EMAC_RGMII_PHY_MODE 0
static int emac_rgmii_init(struct emac_adapter *adpt)
{
u32 val;
unsigned long timeout;
struct emac_hw *hw = &adpt->hw;
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR1, 0, FREQ_MODE);
emac_reg_w32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR18,
EMAC_RGMII_CORE_IE_C);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2,
RGMII_PHY_MODE_BMSK,
(EMAC_RGMII_PHY_MODE << RGMII_PHY_MODE_SHFT));
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3,
PLL_L_VAL_5_0_BMSK,
(EMAC_RGMII_PLL_L_VAL << PLL_L_VAL_5_0_SHFT));
/* Reset PHY PLL */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, PLL_RESET);
/* Ensure PLL is in reset */
wmb();
usleep_range(10, 15);
/* power down analog sections of PLL and ensure the same */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, 0, BYPASSNL);
/* Ensure power down is complete before setting configuration */
wmb();
usleep_range(10, 15);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, CKEDGE_SEL);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2,
TX_ID_EN_L, RX_ID_EN_L);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2,
HDRIVE_BMSK, (0x0 << HDRIVE_SHFT));
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, WOL_EN, 0);
/* Reset PHY */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, 0, PHY_RESET);
/* Ensure reset is complete before pulling out of reset */
wmb();
usleep_range(10, 15);
/* Pull PHY out of reset */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR2, PHY_RESET, 0);
/* Ensure that pulling PHY out of reset is complete before enabling the
* enabling
*/
wmb();
usleep_range(1000, 1500);
/* Pull PHY PLL out of reset */
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR3, PLL_RESET, 0);
/* Ensure PLL is enabled before enabling the AHB clock*/
wmb();
usleep_range(10, 15);
emac_reg_update32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_CSR5,
0, RMII_125_CLK_EN);
/* Ensure AHB clock enable is written to HW before the loop waiting for
* it to complete
*/
wmb();
/* wait for PLL to lock */
timeout = jiffies + EMAC_RGMII_PLL_LOCK_TIMEOUT;
do {
val = emac_reg_r32(hw, EMAC_CSR, EMAC_EMAC_WRAPPER_STATUS);
if (val & PLL_LOCK_DET)
break;
usleep_range(100, 150);
} while (time_after_eq(timeout, jiffies));
if (time_after(jiffies, timeout)) {
emac_err(adpt, "PHY PLL lock failed\n");
return -EIO;
}
return 0;
}
static int emac_rgmii_config(struct platform_device *pdev,
struct emac_adapter *adpt)
{
/* For rgmii phy, the mdio lines are dedicated pins */
return emac_rgmii_init(adpt);
}
static void emac_rgmii_reset_nop(struct emac_adapter *adpt)
{
}
static int emac_rgmii_link_setup_no_ephy(struct emac_adapter *adpt)
{
emac_err(adpt, "error rgmii can't setup phy link without ephy\n");
return -EOPNOTSUPP;
}
static int emac_rgmii_link_check_no_ephy(struct emac_adapter *adpt,
struct phy_device *phydev)
{
emac_err(adpt, "error rgmii can't check phy link without ephy\n");
return -EOPNOTSUPP;
}
static int emac_rgmii_up_nop(struct emac_adapter *adpt)
{
return 0;
}
static void emac_rgmii_down_nop(struct emac_adapter *adpt)
{
}
static void emac_rgmii_tx_clk_set_rate(struct emac_adapter *adpt)
{
struct phy_device *phydev = adpt->phydev;
switch (phydev->speed) {
case SPEED_1000:
clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_125MHZ);
break;
case SPEED_100:
clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_25MHZ);
break;
case SPEED_10:
clk_set_rate(adpt->clk[EMAC_CLK_TX].clk, EMC_CLK_RATE_2_5MHZ);
break;
default:
emac_err(adpt, "error tx clk set rate because of unknown speed\n");
}
}
static void emac_rgmii_periodic_nop(struct emac_adapter *adpt)
{
}
struct emac_phy_ops emac_rgmii_ops = {
.config = emac_rgmii_config,
.up = emac_rgmii_up_nop,
.down = emac_rgmii_down_nop,
.reset = emac_rgmii_reset_nop,
.link_setup_no_ephy = emac_rgmii_link_setup_no_ephy,
.link_check_no_ephy = emac_rgmii_link_check_no_ephy,
.tx_clk_set_rate = emac_rgmii_tx_clk_set_rate,
.periodic_task = emac_rgmii_periodic_nop,
};

View file

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _EMAC_RGMII_H_
#define _EMAC_RGMII_H_
extern struct emac_phy_ops emac_rgmii_ops;
#endif /*_EMAC_RGMII_H_*/

View file

@ -0,0 +1,852 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* Qualcomm Technologies, Inc. EMAC SGMII Controller driver.
*/
#include <linux/iopoll.h>
#include <linux/acpi.h>
#include <linux/of_device.h>
#include "emac_sgmii.h"
#include "emac_hw.h"
#define PCS_MAX_REG_CNT 10
#define PLL_MAX_REG_CNT 18
void emac_reg_write_all(void __iomem *base, const struct emac_reg_write *itr)
{
for (; itr->offset != END_MARKER; ++itr)
writel_relaxed(itr->val, base + itr->offset);
}
static const struct emac_reg_write
physical_coding_sublayer_programming[][PCS_MAX_REG_CNT] = {
/* EMAC_PHY_MAP_DEFAULT */
{
{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
{EMAC_SGMII_PHY_CMN_PWR_CTRL,
BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN
| PLL_RXCLK_EN},
{EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN
| L0_TRAN_BIAS_EN},
{EMAC_SGMII_PHY_RX_PWR_CTRL,
L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
{EMAC_SGMII_PHY_CMN_PWR_CTRL,
BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN
| PLL_TXCLK_EN | PLL_RXCLK_EN},
{EMAC_SGMII_PHY_LANE_CTRL1,
L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN
| L0_DRV_LVL(15)},
{END_MARKER, END_MARKER},
},
/* EMAC_PHY_MAP_MDM9607 */
{
{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
{EMAC_SGMII_PHY_CMN_PWR_CTRL,
BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN
| PLL_RXCLK_EN},
{EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN
| L0_TRAN_BIAS_EN},
{EMAC_SGMII_PHY_RX_PWR_CTRL,
L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN},
{EMAC_SGMII_PHY_CMN_PWR_CTRL,
BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN
| PLL_TXCLK_EN | PLL_RXCLK_EN},
{EMAC_QSERDES_COM_PLL_VCOTAIL_EN, PLL_VCO_TAIL_MUX |
PLL_VCO_TAIL(124) | PLL_EN_VCOTAIL_EN},
{EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN
| PLL_DIV_ORD},
{EMAC_SGMII_PHY_LANE_CTRL1,
L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN
| L0_DRV_LVL(15)},
{END_MARKER, END_MARKER}
},
/* EMAC_PHY_MAP_V2 */
{
{EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B},
{EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)},
{EMAC_SGMII_PHY_TX_PWR_CTRL, 0},
{EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE},
{END_MARKER, END_MARKER}
}
};
static const struct emac_reg_write sysclk_refclk_setting[] = {
{EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS},
{EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE},
{END_MARKER, END_MARKER},
};
static const struct emac_reg_write pll_setting[][PLL_MAX_REG_CNT] = {
/* EMAC_PHY_MAP_DEFAULT */
{
{EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)},
{EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
{EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
{EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
{EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
{EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN
| PLL_DIV_ORD},
{EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
{EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
{EMAC_QSERDES_COM_DIV_FRAC_START1,
DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
{EMAC_QSERDES_COM_DIV_FRAC_START2,
DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
{EMAC_QSERDES_COM_DIV_FRAC_START3,
DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
{EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
{EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
{EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
{EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
{EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
{END_MARKER, END_MARKER}
},
/* EMAC_PHY_MAP_MDM9607 */
{
{EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(3)},
{EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)},
{EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)},
{EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)},
{EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)},
{EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)},
{EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2},
{EMAC_QSERDES_COM_DIV_FRAC_START1,
DIV_FRAC_START_MUX | DIV_FRAC_START(85)},
{EMAC_QSERDES_COM_DIV_FRAC_START2,
DIV_FRAC_START_MUX | DIV_FRAC_START(42)},
{EMAC_QSERDES_COM_DIV_FRAC_START3,
DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)},
{EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)},
{EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)},
{EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)},
{EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN},
{EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE},
{EMAC_QSERDES_COM_RES_TRIM_SEARCH, RESTRIM_SEARCH(0)},
{EMAC_QSERDES_COM_BGTC, BGTC(7)},
{END_MARKER, END_MARKER},
}
};
static const struct emac_reg_write cdr_setting[] = {
{EMAC_QSERDES_RX_CDR_CONTROL,
SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)},
{EMAC_QSERDES_RX_CDR_CONTROL2,
SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)},
{END_MARKER, END_MARKER},
};
static const struct emac_reg_write tx_rx_setting[] = {
{EMAC_QSERDES_TX_BIST_MODE_LANENO, 0},
{EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)},
{EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN},
{EMAC_QSERDES_TX_TX_EMP_POST1_LVL,
TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)},
{EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)},
{EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)},
{END_MARKER, END_MARKER}
};
static const struct emac_reg_write sgmii_v2_laned[] = {
/* CDR Settings */
{EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0,
UCDR_STEP_BY_TWO_MODE0 | UCDR_XO_GAIN_MODE(10)},
{EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_XO_GAIN_MODE(0)},
{EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)},
/* TX/RX Settings */
{EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2},
{EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE},
{EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN},
{EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)},
{EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX},
{EMAC_SGMII_LN_TX_POST, TX_POST_MUX},
{EMAC_SGMII_LN_CML_CTRL_MODE0,
CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)},
{EMAC_SGMII_LN_MIXER_CTRL_MODE0,
MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)},
{EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)},
{EMAC_SGMII_LN_SIGDET_ENABLES,
SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP},
{EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)},
{EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)},
{EMAC_SGMII_LN_RX_MISC_CNTRL0, 0},
{EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV,
DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)},
{EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)},
{EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)},
{EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)},
{EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)},
{EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)},
{EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL},
{END_MARKER, END_MARKER}
};
void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
u32 val;
/* Reset PHY */
val = readl_relaxed(sgmii->base + EMAC_EMAC_WRAPPER_CSR2);
writel_relaxed(((val & ~PHY_RESET) | PHY_RESET),
sgmii->base + EMAC_EMAC_WRAPPER_CSR2);
/* Ensure phy-reset command is written to HW before the release cmd */
wmb();
msleep(50);
val = readl_relaxed(sgmii->base + EMAC_EMAC_WRAPPER_CSR2);
writel_relaxed((val & ~PHY_RESET),
sgmii->base + EMAC_EMAC_WRAPPER_CSR2);
/* Ensure phy-reset release command is written to HW before initializing
* SGMII
*/
wmb();
msleep(50);
}
static void emac_sgmii_reset(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
int ret;
emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, EMC_CLK_RATE_19_2MHZ);
emac_sgmii_reset_prepare(adpt);
ret = sgmii->initialize(adpt);
if (ret)
emac_err(adpt,
"could not reinitialize internal PHY (error=%i)\n",
ret);
emac_clk_set_rate(adpt, EMAC_CLK_HIGH_SPEED, EMC_CLK_RATE_125MHZ);
}
/* LINK */
int emac_sgmii_link_init(struct emac_adapter *adpt)
{
struct phy_device *phydev = adpt->phydev;
struct emac_sgmii *sgmii = adpt->phy.private;
u32 val;
int autoneg, speed, duplex;
autoneg = (adpt->phydev) ? phydev->autoneg : AUTONEG_ENABLE;
speed = (adpt->phydev) ? phydev->speed : SPEED_UNKNOWN;
duplex = (adpt->phydev) ? phydev->duplex : DUPLEX_UNKNOWN;
val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
if (autoneg == AUTONEG_ENABLE) {
val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
val |= AN_ENABLE;
writel_relaxed(val,
sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
} else {
u32 speed_cfg = 0;
switch (speed) {
case SPEED_10:
speed_cfg = SPDMODE_10;
break;
case SPEED_100:
speed_cfg = SPDMODE_100;
break;
case SPEED_1000:
speed_cfg = SPDMODE_1000;
break;
default:
return -EINVAL;
}
if (duplex == DUPLEX_FULL)
speed_cfg |= DUPLEX_MODE;
val &= ~AN_ENABLE;
writel_relaxed(speed_cfg,
sgmii->base + EMAC_SGMII_PHY_SPEED_CFG1);
writel_relaxed(val, sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
}
/* Ensure Auto-Neg setting are written to HW before leaving */
wmb();
return 0;
}
int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
{
struct emac_sgmii *sgmii = adpt->phy.private;
u32 status;
writel_relaxed(irq_bits, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
writel_relaxed(IRQ_GLOBAL_CLEAR, sgmii->base + EMAC_SGMII_PHY_IRQ_CMD);
/* Ensure interrupt clear command is written to HW */
wmb();
/* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must
* be confirmed before clearing the bits in other registers.
* It takes a few cycles for hw to clear the interrupt status.
*/
if (readl_poll_timeout_atomic(sgmii->base +
EMAC_SGMII_PHY_INTERRUPT_STATUS,
status, !(status & irq_bits), 1,
SGMII_PHY_IRQ_CLR_WAIT_TIME)) {
emac_err(adpt,
"error: failed clear SGMII irq: status:0x%x bits:0x%x\n",
status, irq_bits);
return -EIO;
}
/* Finalize clearing procedure */
writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_IRQ_CMD);
writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
/* Ensure that clearing procedure finalization is written to HW */
wmb();
return 0;
}
int emac_sgmii_init_ephy_nop(struct emac_adapter *adpt)
{
return 0;
}
int emac_sgmii_autoneg_check(struct emac_adapter *adpt,
struct phy_device *phydev)
{
struct emac_sgmii *sgmii = adpt->phy.private;
u32 autoneg0, autoneg1, status;
autoneg0 = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG0_STATUS);
autoneg1 = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG1_STATUS);
status = ((autoneg1 & 0xff) << 8) | (autoneg0 & 0xff);
if (!(status & TXCFG_LINK)) {
phydev->link = false;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
return 0;
}
phydev->link = true;
switch (status & TXCFG_MODE_BMSK) {
case TXCFG_1000_FULL:
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_FULL;
break;
case TXCFG_100_FULL:
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
break;
case TXCFG_100_HALF:
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_HALF;
break;
case TXCFG_10_FULL:
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_FULL;
break;
case TXCFG_10_HALF:
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
break;
default:
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
break;
}
return 0;
}
int emac_sgmii_link_check_no_ephy(struct emac_adapter *adpt,
struct phy_device *phydev)
{
struct emac_sgmii *sgmii = adpt->phy.private;
u32 val;
val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
if (val & AN_ENABLE)
return emac_sgmii_autoneg_check(adpt, phydev);
val = readl_relaxed(sgmii->base + EMAC_SGMII_PHY_SPEED_CFG1);
val &= DUPLEX_MODE | SPDMODE_BMSK;
switch (val) {
case DUPLEX_MODE | SPDMODE_1000:
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_FULL;
break;
case DUPLEX_MODE | SPDMODE_100:
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
break;
case SPDMODE_100:
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_HALF;
break;
case DUPLEX_MODE | SPDMODE_10:
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_FULL;
break;
case SPDMODE_10:
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
break;
default:
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
break;
}
phydev->link = true;
return 0;
}
irqreturn_t emac_sgmii_isr(int _irq, void *data)
{
struct emac_adapter *adpt = data;
struct emac_sgmii *sgmii = adpt->phy.private;
u32 status;
emac_dbg(adpt, intr, adpt->netdev, "receive sgmii interrupt\n");
do {
status = readl_relaxed(sgmii->base +
EMAC_SGMII_PHY_INTERRUPT_STATUS) &
SGMII_ISR_MASK;
if (!status)
break;
if (status & SGMII_PHY_INTERRUPT_ERR) {
SET_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ);
if (!TEST_FLAG(adpt, ADPT_STATE_DOWN))
emac_task_schedule(adpt);
}
if (status & SGMII_ISR_AN_MASK)
emac_check_lsc(adpt);
if (emac_sgmii_irq_clear(adpt, status) != 0) {
/* reset */
SET_FLAG(adpt, ADPT_TASK_REINIT_REQ);
emac_task_schedule(adpt);
break;
}
} while (1);
return IRQ_HANDLED;
}
int emac_sgmii_up(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
int ret;
ret = request_irq(sgmii->irq, emac_sgmii_isr, IRQF_TRIGGER_RISING,
"sgmii_irq", adpt);
if (ret)
emac_err(adpt,
"error:%d on request_irq(%d:sgmii_irq)\n", ret,
sgmii->irq);
/* enable sgmii irq */
writel_relaxed(SGMII_ISR_MASK,
sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
return ret;
}
void emac_sgmii_down(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
synchronize_irq(sgmii->irq);
free_irq(sgmii->irq, adpt);
}
int emac_sgmii_link_setup_no_ephy(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
/* The AN_ENABLE and SPEED_CFG can't change on fly. The SGMII_PHY has
* to be re-initialized.
*/
emac_sgmii_reset_prepare(adpt);
return sgmii->initialize(adpt);
}
void emac_sgmii_tx_clk_set_rate_nop(struct emac_adapter *adpt)
{
}
/* Check SGMII for error */
void emac_sgmii_periodic_check(struct emac_adapter *adpt)
{
struct emac_sgmii *sgmii = adpt->phy.private;
if (!TEST_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ))
return;
CLR_FLAG(adpt, ADPT_TASK_CHK_SGMII_REQ);
/* ensure that no reset is in progress while link task is running */
while (TEST_N_SET_FLAG(adpt, ADPT_STATE_RESETTING))
msleep(20); /* Reset might take few 10s of ms */
if (TEST_FLAG(adpt, ADPT_STATE_DOWN))
goto sgmii_task_done;
if (readl_relaxed(sgmii->base + EMAC_SGMII_PHY_RX_CHK_STATUS) & 0x40)
goto sgmii_task_done;
emac_err(adpt, "SGMII CDR not locked\n");
sgmii_task_done:
CLR_FLAG(adpt, ADPT_STATE_RESETTING);
}
static int emac_sgmii_init_v1_0(struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
struct emac_sgmii *sgmii = phy->private;
unsigned int i;
int ret;
ret = emac_sgmii_link_init(adpt);
if (ret)
return ret;
emac_reg_write_all(sgmii->base,
(const struct emac_reg_write *)
&physical_coding_sublayer_programming[EMAC_PHY_MAP_DEFAULT]);
/* Ensure Rx/Tx lanes power configuration is written to hw before
* configuring the SerDes engine's clocks
*/
wmb();
emac_reg_write_all(sgmii->base, sysclk_refclk_setting);
emac_reg_write_all(sgmii->base,
(const struct emac_reg_write *)
&pll_setting[EMAC_PHY_MAP_DEFAULT]);
emac_reg_write_all(sgmii->base, cdr_setting);
emac_reg_write_all(sgmii->base, tx_rx_setting);
/* Ensure SerDes engine configuration is written to hw before powering
* it up
*/
wmb();
writel_relaxed(SERDES_START, sgmii->base + EMAC_SGMII_PHY_SERDES_START);
/* Ensure Rx/Tx SerDes engine power-up command is written to HW */
wmb();
for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
if (readl_relaxed(sgmii->base + EMAC_QSERDES_COM_RESET_SM)
& READY)
break;
usleep_range(100, 200);
}
if (i == SERDES_START_WAIT_TIMES) {
emac_err(adpt, "serdes failed to start\n");
return -EIO;
}
/* Mask out all the SGMII Interrupt */
writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
/* Ensure SGMII interrupts are masked out before clearing them */
wmb();
emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
return 0;
}
static int emac_sgmii_init_v1_1(struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
struct emac_sgmii *sgmii = phy->private;
unsigned int i;
int ret;
ret = emac_sgmii_link_init(adpt);
if (ret)
return ret;
emac_reg_write_all(sgmii->base,
(const struct emac_reg_write *)
&physical_coding_sublayer_programming[EMAC_PHY_MAP_MDM9607]);
/* Ensure Rx/Tx lanes power configuration is written to hw before
* configuring the SerDes engine's clocks
*/
wmb();
emac_reg_write_all(sgmii->base, sysclk_refclk_setting);
emac_reg_write_all(sgmii->base,
(const struct emac_reg_write *)
&pll_setting[EMAC_PHY_MAP_MDM9607]);
emac_reg_write_all(sgmii->base, cdr_setting);
emac_reg_write_all(sgmii->base, tx_rx_setting);
/* Ensure SerDes engine configuration is written to hw before powering
* it up
*/
wmb();
/* Power up the Ser/Des engine */
writel_relaxed(SERDES_START, sgmii->base + EMAC_SGMII_PHY_SERDES_START);
/* Ensure Rx/Tx SerDes engine power-up command is written to HW */
wmb();
for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
if (readl_relaxed(sgmii->base + EMAC_QSERDES_COM_RESET_SM)
& READY)
break;
usleep_range(100, 200);
}
if (i == SERDES_START_WAIT_TIMES) {
emac_err(adpt, "serdes failed to start\n");
return -EIO;
}
/* Mask out all the SGMII Interrupt */
writel_relaxed(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
/* Ensure SGMII interrupts are masked out before clearing them */
wmb();
emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
return 0;
}
static int emac_sgmii_init_v2(struct emac_adapter *adpt)
{
struct emac_phy *phy = &adpt->phy;
struct emac_sgmii *sgmii = phy->private;
void __iomem *phy_regs = sgmii->base;
void __iomem *laned = sgmii->digital;
unsigned int i;
u32 lnstatus;
int ret;
ret = emac_sgmii_link_init(adpt);
if (ret)
return ret;
/* PCS lane-x init */
emac_reg_write_all(sgmii->base,
(const struct emac_reg_write *)
&physical_coding_sublayer_programming[EMAC_PHY_MAP_V2]);
/* Ensure Rx/Tx lanes power configuration is written to hw before
* configuring the SerDes engine's clocks
*/
wmb();
/* SGMII lane-x init */
emac_reg_write_all(sgmii->digital, sgmii_v2_laned);
/* Power up PCS and start reset lane state machine */
writel_relaxed(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL);
writel_relaxed(1, laned + SGMII_LN_RSM_START);
wmb(); /* ensure power up is written before checking lane status */
/* Wait for c_ready assertion */
for (i = 0; i < SERDES_START_WAIT_TIMES; i++) {
lnstatus = readl_relaxed(phy_regs + SGMII_PHY_LN_LANE_STATUS);
rmb(); /* ensure status read is complete before testing it */
if (lnstatus & BIT(1))
break;
usleep_range(100, 200);
}
if (i == SERDES_START_WAIT_TIMES) {
emac_err(adpt, "SGMII failed to start\n");
return -EIO;
}
/* Disable digital and SERDES loopback */
writel_relaxed(0, phy_regs + SGMII_PHY_LN_BIST_GEN0);
writel_relaxed(0, phy_regs + SGMII_PHY_LN_BIST_GEN2);
writel_relaxed(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1);
/* Mask out all the SGMII Interrupt */
writel_relaxed(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK);
wmb(); /* ensure writes are flushed to hw */
emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
return 0;
}
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
static const struct acpi_device_id match_table[] = {
{
.id = "QCOM8071",
.driver_data = (kernel_ulong_t)emac_sgmii_init_v2,
},
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
emac_sgmii_initialize *initialize = data;
if (id)
*initialize = (emac_sgmii_initialize)id->driver_data;
return !!id;
}
static const struct of_device_id emac_sgmii_dt_match[] = {
{
.compatible = "qcom,fsm9900-emac-sgmii",
.data = emac_sgmii_init_v1_0,
},
{
.compatible = "qcom,qdf2432-emac-sgmii",
.data = emac_sgmii_init_v2,
},
{
.compatible = "qcom,mdm9607-emac-sgmii",
.data = emac_sgmii_init_v1_1,
},
{}
};
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
struct emac_sgmii *sgmii;
struct resource *res;
int ret = 0;
sgmii = devm_kzalloc(&pdev->dev, sizeof(*sgmii), GFP_KERNEL);
if (!sgmii)
return -ENOMEM;
if (ACPI_COMPANION(&pdev->dev)) {
struct device *dev;
dev = device_find_child(&pdev->dev, &sgmii->initialize,
emac_sgmii_acpi_match);
if (!dev) {
emac_err(adpt, "cannot find internal phy node\n");
return -ENODEV;
}
sgmii_pdev = to_platform_device(dev);
} else {
const struct of_device_id *match;
struct device_node *np;
np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0);
if (!np) {
emac_err(adpt, "missing internal-phy property\n");
return -ENODEV;
}
sgmii_pdev = of_find_device_by_node(np);
if (!sgmii_pdev) {
emac_err(adpt, "invalid internal-phy property\n");
return -ENODEV;
}
match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev);
if (!match) {
emac_err(adpt, "unrecognized internal phy node\n");
ret = -ENODEV;
goto error_put_device;
}
sgmii->initialize = (emac_sgmii_initialize)match->data;
}
/* Base address is the first address */
res = platform_get_resource_byname(sgmii_pdev, IORESOURCE_MEM,
"emac_sgmii");
if (!res) {
emac_err(adpt,
"error platform_get_resource_byname(emac_sgmii)\n");
ret = -EINVAL;
goto error_put_device;
}
sgmii->base = ioremap(res->start, resource_size(res));
if (IS_ERR(sgmii->base)) {
emac_err(adpt,
"error:%ld remap (start:0x%lx size:0x%lx)\n",
PTR_ERR(sgmii->base), (ulong)res->start,
(ulong)resource_size(res));
ret = -ENOMEM;
goto error_put_device;
}
/* v2 SGMII has a per-lane digital, so parse it if it exists */
res = platform_get_resource_byname(sgmii_pdev, IORESOURCE_MEM,
"emac_digital");
if (res) {
sgmii->digital = devm_ioremap_resource(&sgmii_pdev->dev, res);
if (!sgmii->digital) {
ret = -ENOMEM;
goto error_unmap_base;
}
}
ret = platform_get_irq_byname(sgmii_pdev, "emac_sgmii_irq");
if (ret < 0)
goto error;
sgmii->irq = ret;
adpt->phy.private = sgmii;
ret = sgmii->initialize(adpt);
if (ret)
goto error;
/* We've remapped the addresses, so we don't need the device any
* more. of_find_device_by_node() says we should release it.
*/
put_device(&sgmii_pdev->dev);
return 0;
error:
if (sgmii->digital)
iounmap(sgmii->digital);
error_unmap_base:
iounmap(sgmii->base);
error_put_device:
put_device(&sgmii_pdev->dev);
return ret;
}
struct emac_phy_ops emac_sgmii_ops = {
.config = emac_sgmii_config,
.up = emac_sgmii_up,
.down = emac_sgmii_down,
.reset = emac_sgmii_reset,
.link_setup_no_ephy = emac_sgmii_link_setup_no_ephy,
.link_check_no_ephy = emac_sgmii_link_check_no_ephy,
.tx_clk_set_rate = emac_sgmii_tx_clk_set_rate_nop,
.periodic_task = emac_sgmii_periodic_check,
};

View file

@ -0,0 +1,398 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _EMAC_SGMII_H_
#define _EMAC_SGMII_H_
#include "emac_main.h"
/* EMAC_QSERDES register offsets */
#define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000
#define EMAC_QSERDES_COM_PLL_VCOTAIL_EN 0x000004
#define EMAC_QSERDES_COM_PLL_CNTRL 0x000014
#define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018
#define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024
#define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028
#define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c
#define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038
#define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040
#define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044
#define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048
#define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c
#define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050
#define EMAC_QSERDES_COM_BGTC 0x000058
#define EMAC_QSERDES_COM_DEC_START1 0x000064
#define EMAC_QSERDES_COM_RES_TRIM_SEARCH 0x000088
#define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098
#define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c
#define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0
#define EMAC_QSERDES_COM_DEC_START2 0x0000a4
#define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac
#define EMAC_QSERDES_COM_RESET_SM 0x0000bc
#define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100
#define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108
#define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c
#define EMAC_QSERDES_TX_LANE_MODE 0x000150
#define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170
#define EMAC_QSERDES_RX_CDR_CONTROL 0x000200
#define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210
#define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230
/* EMAC_SGMII register offsets */
#define EMAC_SGMII_PHY_SERDES_START 0x000300
#define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000304
#define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000308
#define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00030C
#define EMAC_SGMII_PHY_LANE_CTRL1 0x000318
#define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000348
#define EMAC_SGMII_PHY_CDR_CTRL0 0x000358
#define EMAC_SGMII_PHY_SPEED_CFG1 0x000374
#define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000380
#define EMAC_SGMII_PHY_RESET_CTRL 0x0003a8
#define EMAC_SGMII_PHY_IRQ_CMD 0x0003ac
#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0003b0
#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0003b4
#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0003b8
#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0003d4
#define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0003e0
#define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0003e4
/* EMAC_QSERDES_COM_PLL_IP_SETI */
#define PLL_IPSETI(x) ((x) & 0x3f)
/* EMAC_QSERDES_COM_PLL_CP_SETI */
#define PLL_CPSETI(x) ((x) & 0xff)
/* EMAC_QSERDES_COM_PLL_IP_SETP */
#define PLL_IPSETP(x) ((x) & 0x3f)
/* EMAC_QSERDES_COM_PLL_CP_SETP */
#define PLL_CPSETP(x) ((x) & 0x1f)
/* EMAC_QSERDES_COM_PLL_CRCTRL */
#define PLL_RCTRL(x) (((x) & 0xf) << 4)
#define PLL_CCTRL(x) ((x) & 0xf)
/* SGMII v2 PHY registers per lane */
#define EMAC_SGMII_PHY_LN_OFFSET 0x0400
/* SGMII v2 digital lane registers */
#define EMAC_SGMII_LN_DRVR_CTRL0 0x00C
#define EMAC_SGMII_LN_DRVR_TAP_EN 0x018
#define EMAC_SGMII_LN_TX_MARGINING 0x01C
#define EMAC_SGMII_LN_TX_PRE 0x020
#define EMAC_SGMII_LN_TX_POST 0x024
#define EMAC_SGMII_LN_TX_BAND_MODE 0x060
#define EMAC_SGMII_LN_LANE_MODE 0x064
#define EMAC_SGMII_LN_PARALLEL_RATE 0x078
#define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8
#define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0
#define EMAC_SGMII_LN_VGA_INITVAL 0x134
#define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C
#define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188
#define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194
#define EMAC_SGMII_LN_RX_BAND 0x19C
#define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8
#define EMAC_SGMII_LN_RSM_CONFIG 0x1F0
#define EMAC_SGMII_LN_SIGDET_ENABLES 0x224
#define EMAC_SGMII_LN_SIGDET_CNTRL 0x228
#define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C
#define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0
#define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC
#define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC
/* SGMII v2 per lane registers */
#define SGMII_LN_RSM_START 0x029C
/* SGMII v2 PHY common registers */
#define SGMII_PHY_CMN_CTRL 0x0408
#define SGMII_PHY_CMN_RESET_CTRL 0x0410
/* SGMII v2 PHY registers per lane */
#define SGMII_PHY_LN_OFFSET 0x0400
#define SGMII_PHY_LN_LANE_STATUS 0x00DC
#define SGMII_PHY_LN_BIST_GEN0 0x008C
#define SGMII_PHY_LN_BIST_GEN1 0x0090
#define SGMII_PHY_LN_BIST_GEN2 0x0094
#define SGMII_PHY_LN_BIST_GEN3 0x0098
#define SGMII_PHY_LN_CDR_CTRL1 0x005C
/* SGMII v2 digital lane register values */
#define UCDR_STEP_BY_TWO_MODE0 BIT(7)
#define UCDR_XO_GAIN_MODE(x) ((x) & 0x7f)
#define UCDR_ENABLE BIT(6)
#define UCDR_SO_SATURATION(x) ((x) & 0x3f)
#define SIGDET_LP_BYP_PS4 BIT(7)
#define SIGDET_EN_PS0_TO_PS2 BIT(6)
#define EN_ACCOUPLEVCM_SW_MUX BIT(5)
#define EN_ACCOUPLEVCM_SW BIT(4)
#define RX_SYNC_EN BIT(3)
#define RXTERM_HIGHZ_PS5 BIT(2)
#define SIGDET_EN_PS3 BIT(1)
#define EN_ACCOUPLE_VCM_PS3 BIT(0)
#define UFS_MODE BIT(5)
#define TXVAL_VALID_INIT BIT(4)
#define TXVAL_VALID_MUX BIT(3)
#define TXVAL_VALID BIT(2)
#define USB3P1_MODE BIT(1)
#define KR_PCIGEN3_MODE BIT(0)
#define PRE_EN BIT(3)
#define POST_EN BIT(2)
#define MAIN_EN_MUX BIT(1)
#define MAIN_EN BIT(0)
#define TX_MARGINING_MUX BIT(6)
#define TX_MARGINING(x) ((x) & 0x3f)
#define TX_PRE_MUX BIT(6)
#define TX_PRE(x) ((x) & 0x3f)
#define TX_POST_MUX BIT(6)
#define TX_POST(x) ((x) & 0x3f)
#define CML_GEAR_MODE(x) (((x) & 7) << 3)
#define CML2CMOS_IBOOST_MODE(x) ((x) & 7)
#define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2)
#define MIXER_DATARATE_MODE(x) ((x) & 3)
#define VGA_THRESH_DFE(x) ((x) & 0x3f)
#define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5)
#define SIGDET_LP_BYP_MUX BIT(4)
#define SIGDET_LP_BYP BIT(3)
#define SIGDET_EN_MUX BIT(2)
#define SIGDET_EN BIT(1)
#define SIGDET_FLT_BYP BIT(0)
#define SIGDET_LVL(x) (((x) & 0xf) << 4)
#define SIGDET_BW_CTRL(x) ((x) & 0xf)
#define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1)
#define SIGDET_DEGLITCH_BYP BIT(0)
#define INVERT_PCS_RX_CLK BIT(7)
#define PWM_EN BIT(6)
#define RXBIAS_SEL(x) (((x) & 0x3) << 4)
#define EBDAC_SIGN BIT(3)
#define EDAC_SIGN BIT(2)
#define EN_AUXTAP1SIGN_INVERT BIT(1)
#define EN_DAC_CHOPPING BIT(0)
#define DRVR_LOGIC_CLK_EN BIT(4)
#define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf)
#define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4)
#define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2)
#define PARALLEL_RATE_MODE0(x) ((x) & 0x3)
#define BAND_MODE2(x) (((x) & 0x3) << 4)
#define BAND_MODE1(x) (((x) & 0x3) << 2)
#define BAND_MODE0(x) ((x) & 0x3)
#define LANE_SYNC_MODE BIT(5)
#define LANE_MODE(x) ((x) & 0x1f)
#define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5)
#define EN_DLL_MODE0 BIT(4)
#define EN_IQ_DCC_MODE0 BIT(3)
#define EN_IQCAL_MODE0 BIT(2)
#define EN_QPATH_MODE0 BIT(1)
#define EN_EPATH_MODE0 BIT(0)
#define FORCE_TSYNC_ACK BIT(7)
#define FORCE_CMN_ACK BIT(6)
#define FORCE_CMN_READY BIT(5)
#define EN_RCLK_DEGLITCH BIT(4)
#define BYPASS_RSM_CDR_RESET BIT(3)
#define BYPASS_RSM_TSYNC BIT(2)
#define BYPASS_RSM_SAMP_CAL BIT(1)
#define BYPASS_RSM_DLL_CAL BIT(0)
/* EMAC_QSERDES_COM_SYS_CLK_CTRL */
#define SYSCLK_CM BIT(4)
#define SYSCLK_AC_COUPLE BIT(3)
/* EMAC_QSERDES_COM_PLL_VCOTAIL_EN */
#define PLL_VCO_TAIL_MUX BIT(7)
#define PLL_VCO_TAIL(x) ((x) & 0x7c)
#define PLL_EN_VCOTAIL_EN BIT(0)
/* EMAC_QSERDES_COM_PLL_CNTRL */
#define OCP_EN BIT(5)
#define PLL_DIV_FFEN BIT(2)
#define PLL_DIV_ORD BIT(1)
/* EMAC_QSERDES_COM_SYSCLK_EN_SEL */
#define SYSCLK_SEL_CMOS BIT(3)
/* EMAC_QSERDES_COM_RES_TRIM_SEARCH */
#define RESTRIM_SEARCH(x) ((x) & 0xff)
/* EMAC_QSERDES_COM_BGTC */
#define BGTC(x) ((x) & 0x1f)
/* EMAC_QSERDES_COM_RESETSM_CNTRL */
#define FRQ_TUNE_MODE BIT(4)
/* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */
#define PLLLOCK_CMP_EN BIT(0)
/* EMAC_QSERDES_COM_DEC_START1 */
#define DEC_START1_MUX BIT(7)
#define DEC_START1(x) ((x) & 0x7f)
/* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */
#define DIV_FRAC_START_MUX BIT(7)
#define DIV_FRAC_START(x) ((x) & 0x7f)
/* EMAC_QSERDES_COM_DIV_FRAC_START3 */
#define DIV_FRAC_START3_MUX BIT(4)
#define DIV_FRAC_START3(x) ((x) & 0xf)
/* EMAC_QSERDES_COM_DEC_START2 */
#define DEC_START2_MUX BIT(1)
#define DEC_START2 BIT(0)
/* EMAC_QSERDES_COM_RESET_SM */
#define READY BIT(5)
/* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */
#define TX_EMP_POST1_LVL_MUX BIT(5)
#define TX_EMP_POST1_LVL(x) ((x) & 0x1f)
#define TX_EMP_POST1_LVL_BMSK 0x1f
#define TX_EMP_POST1_LVL_SHFT 0
/* EMAC_QSERDES_TX_TX_DRV_LVL */
#define TX_DRV_LVL_MUX BIT(4)
#define TX_DRV_LVL(x) ((x) & 0xf)
/* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */
#define EMP_EN_MUX BIT(1)
#define EMP_EN BIT(0)
/* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */
#define HBW_PD_EN BIT(7)
#define SECONDORDERENABLE BIT(6)
#define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3)
#define SECONDORDERGAIN(x) ((x) & 0x7)
/* EMAC_QSERDES_RX_RX_EQ_GAIN12 */
#define RX_EQ_GAIN2(x) (((x) & 0xf) << 4)
#define RX_EQ_GAIN1(x) ((x) & 0xf)
/* EMAC_SGMII_PHY_SERDES_START */
#define SERDES_START BIT(0)
/* EMAC_SGMII_PHY_CMN_PWR_CTRL */
#define BIAS_EN BIT(6)
#define PLL_EN BIT(5)
#define SYSCLK_EN BIT(4)
#define CLKBUF_L_EN BIT(3)
#define PLL_TXCLK_EN BIT(1)
#define PLL_RXCLK_EN BIT(0)
/* EMAC_SGMII_PHY_RX_PWR_CTRL */
#define L0_RX_SIGDET_EN BIT(7)
#define L0_RX_TERM_MODE(x) (((x) & 3) << 4)
#define L0_RX_I_EN BIT(1)
/* EMAC_SGMII_PHY_TX_PWR_CTRL */
#define L0_TX_EN BIT(5)
#define L0_CLKBUF_EN BIT(4)
#define L0_TRAN_BIAS_EN BIT(1)
/* EMAC_SGMII_PHY_LANE_CTRL1 */
#define L0_RX_EQUALIZE_ENABLE BIT(6)
#define L0_RESET_TSYNC_EN BIT(4)
#define L0_DRV_LVL(x) ((x) & 0xf)
/* EMAC_SGMII_PHY_AUTONEG_CFG2 */
#define FORCE_AN_TX_CFG BIT(5)
#define FORCE_AN_RX_CFG BIT(4)
#define AN_ENABLE BIT(0)
/* EMAC_SGMII_PHY_SPEED_CFG1 */
#define DUPLEX_MODE BIT(4)
#define SPDMODE_1000 BIT(1)
#define SPDMODE_100 BIT(0)
#define SPDMODE_10 0
#define SPDMODE_BMSK 3
#define SPDMODE_SHFT 0
/* EMAC_SGMII_PHY_POW_DWN_CTRL0 */
#define PWRDN_B BIT(0)
#define CDR_MAX_CNT(x) ((x) & 0xff)
/* EMAC_QSERDES_TX_BIST_MODE_LANENO */
#define BIST_LANE_NUMBER(x) (((x) & 3) << 5)
#define BISTMODE(x) ((x) & 0x1f)
/* EMAC_QSERDES_COM_PLLLOCK_CMPx */
#define PLLLOCK_CMP(x) ((x) & 0xff)
/* EMAC_SGMII_PHY_RESET_CTRL */
#define PHY_SW_RESET BIT(0)
/* EMAC_SGMII_PHY_IRQ_CMD */
#define IRQ_GLOBAL_CLEAR BIT(0)
/* EMAC_SGMII_PHY_INTERRUPT_MASK */
#define DECODE_CODE_ERR BIT(7)
#define DECODE_DISP_ERR BIT(6)
#define PLL_UNLOCK BIT(5)
#define AN_ILLEGAL_TERM BIT(4)
#define SYNC_FAIL BIT(3)
#define AN_START BIT(2)
#define AN_END BIT(1)
#define AN_REQUEST BIT(0)
#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
#define SGMII_PHY_INTERRUPT_ERR (\
DECODE_CODE_ERR |\
DECODE_DISP_ERR)
#define SGMII_ISR_AN_MASK (\
AN_REQUEST |\
AN_START |\
AN_END |\
AN_ILLEGAL_TERM |\
PLL_UNLOCK |\
SYNC_FAIL)
#define SGMII_ISR_MASK (\
SGMII_PHY_INTERRUPT_ERR |\
SGMII_ISR_AN_MASK)
/* SGMII TX_CONFIG */
#define TXCFG_LINK 0x8000
#define TXCFG_MODE_BMSK 0x1c00
#define TXCFG_1000_FULL 0x1800
#define TXCFG_100_FULL 0x1400
#define TXCFG_100_HALF 0x0400
#define TXCFG_10_FULL 0x1000
#define TXCFG_10_HALF 0x0000
#define SERDES_START_WAIT_TIMES 100
struct emac_reg_write {
ulong offset;
#define END_MARKER 0xffffffff
u32 val;
};
typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
/** emac_sgmii - internal sgmii phy
* @base base address
* @digital per-lane digital block
* @irq interrupt number
* @initialize initialization function
*/
struct emac_sgmii {
void __iomem *base;
void __iomem *digital;
int irq;
emac_sgmii_initialize initialize;
};
extern struct emac_phy_ops emac_sgmii_ops;
#endif /*_EMAC_SGMII_H_*/

View file

@ -168,6 +168,8 @@
#define DWMAC4_PCS_BASE 0x000000e0
#define RGMII_CONFIG_10M_CLK_DVD GENMASK(18, 10)
static int phytype = -1;
static int boardtype = -1;
void *ipc_emac_log_ctxt;
struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0};
@ -177,6 +179,14 @@ struct plat_stmmacenet_data *plat_dat;
struct qcom_ethqos *pethqos;
#ifdef MODULE
static char *board;
module_param(board, charp, 0660);
MODULE_PARM_DESC(board, "board type of the device");
static char *enet;
module_param(enet, charp, 0660);
MODULE_PARM_DESC(enet, "enet value for the phy connection");
static char *eipv4;
module_param(eipv4, charp, 0660);
MODULE_PARM_DESC(eipv4, "ipv4 value from ethernet partition");
@ -203,6 +213,28 @@ static unsigned char dev_addr[ETH_ALEN] = {
0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7};
static struct ip_params pparams = {"", "", "", ""};
static int set_board_type(char *board_params)
{
if (!strcmp(board_params, "Air"))
boardtype = AIR_BOARD;
else if (!strcmp(board_params, "Star"))
boardtype = STAR_BOARD;
else
return -1;
return 0;
}
static int set_phy_type(char *enet_params)
{
if (!strcmp(enet_params, "1") || !strcmp(enet_params, "2"))
phytype = PHY_1G;
else if (!strcmp(enet_params, "3") || !strcmp(enet_params, "6"))
phytype = PHY_25G;
else
return -1;
return 0;
}
static int set_early_ethernet_ipv4(char *ipv4_addr_in)
{
int ret = 1;
@ -278,6 +310,11 @@ fail:
}
#ifndef MODULE
__setup("board=", set_board_type);
__setup("enet=", set_phy_type);
static int __init set_early_ethernet_ipv4_static(char *ipv4_addr_in)
{
int ret = 1;
@ -338,8 +375,11 @@ static int qcom_ethqos_add_ipaddr(struct ip_params *ip_info,
} else {
ETHQOSINFO("Assigned IPv4 address: %s\r\n",
ip_info->ipv4_addr_str);
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Etherent Assigned IPv4 address");
#else
ETHQOSINFO("M - Etherent Assigned IPv4 address\n");
#endif
}
return res;
}
@ -385,8 +425,11 @@ static int qcom_ethqos_add_ipv6addr(struct ip_params *ip_info,
} else {
ETHQOSDBG("Assigned IPv6 address: %s\r\n",
ip_info->ipv6_addr_str);
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Ethernet Assigned IPv6 address");
#else
ETHQOSINFO("M - Ethernet Assigned IPv6 address\n");
#endif
}
return ret;
}
@ -2139,6 +2182,18 @@ static int ethqos_set_early_eth_param(struct stmmac_priv *priv,
return 0;
}
static void qcom_ethqos_disable_phy_clks(struct qcom_ethqos *ethqos)
{
ETHQOSINFO("Enter\n");
if (ethqos->phyaux_clk)
clk_disable_unprepare(ethqos->phyaux_clk);
if (ethqos->sgmiref_clk)
clk_disable_unprepare(ethqos->sgmiref_clk);
ETHQOSINFO("Exit\n");
}
static void qcom_ethqos_request_phy_wol(void *plat_n)
{
struct plat_stmmacenet_data *plat = plat_n;
@ -2217,10 +2272,19 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ETHQOSERR("Error creating logging context for emac\n");
else
ETHQOSDBG("IPC logging has been enabled for emac\n");
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Ethernet probe start");
#else
ETHQOSINFO("M - Ethernet probe start\n");
#endif
#ifdef MODULE
if (enet)
ret = set_phy_type(enet);
if (board)
ret = set_board_type(board);
if (eipv4)
ret = set_early_ethernet_ipv4(eipv4);
@ -2362,7 +2426,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
}
ETHQOSDBG("gdsc-off-on-suspend = %d\n",
ethqos->gdsc_off_on_suspend);
plat_dat->phy_type = phytype;
plat_dat->board_type = boardtype;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
goto err_clk;
@ -2412,8 +2477,11 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos_set_early_eth_param(priv, ethqos);
}
atomic_set(&priv->plat->phy_clks_suspended, 0);
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Ethernet probe end");
#else
ETHQOSINFO("M - Ethernet probe end\n");
#endif
return ret;
err_clk:
@ -2421,7 +2489,11 @@ err_clk:
err_mem:
stmmac_remove_config_dt(pdev, plat_dat);
if (ethqos) {
ethqos->driver_load_fail = true;
qcom_ethqos_disable_phy_clks(ethqos);
ethqos_disable_regulators(ethqos);
}
return ret;
}
@ -2489,13 +2561,18 @@ static int qcom_ethqos_suspend(struct device *dev)
return 0;
}
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_freeze(dev);
ethqos = get_stmmac_bsp_priv(dev);
if (!ethqos)
return -ENODEV;
if (ethqos->driver_load_fail) {
ETHQOSINFO("driver load failed\n");
return 0;
}
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_freeze(dev);
ndev = dev_get_drvdata(dev);
if (!ndev)
return -EINVAL;
@ -2527,14 +2604,19 @@ static int qcom_ethqos_resume(struct device *dev)
if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
return 0;
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_restore(dev);
ethqos = get_stmmac_bsp_priv(dev);
if (!ethqos)
return -ENODEV;
if (ethqos->driver_load_fail) {
ETHQOSINFO("driver load failed\n");
return 0;
}
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_restore(dev);
if (ethqos->gdsc_off_on_suspend) {
ret = regulator_enable(ethqos->gdsc_emac);
if (ret)

View file

@ -177,6 +177,7 @@ struct qcom_ethqos {
struct delayed_work ipv4_addr_assign_wq;
struct delayed_work ipv6_addr_assign_wq;
bool early_eth_enabled;
bool driver_load_fail;
/* Key Performance Indicators */
bool print_kpi;

View file

@ -23,6 +23,7 @@
#include <linux/reset.h>
#include <net/page_pool.h>
#include <uapi/linux/bpf.h>
#include <linux/bootmarker_kernel.h>
struct stmmac_resources {
void __iomem *addr;
@ -55,6 +56,11 @@ struct stmmac_tx_info {
#define STMMAC_TBS_AVAIL BIT(0)
#define STMMAC_TBS_EN BIT(1)
#define AIR_BOARD 1
#define STAR_BOARD 2
#define PHY_1G 1
#define PHY_25G 2
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
u32 tx_count_frames;

View file

@ -1229,7 +1229,11 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_fpe_link_state_handle(priv, true);
if (!priv->boot_kpi) {
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Ethernet is Ready.Link is UP");
#else
pr_info("M - Ethernet is Ready.Link is UP\n");
#endif
priv->boot_kpi = true;
}
}
@ -2854,7 +2858,11 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
priv->xstats.txq_stats[queue].tx_pkt_n++;
if (priv->dev->stats.tx_packets == 1)
#if (IS_ENABLED(CONFIG_BOOTMARKER_PROXY))
bootmarker_place_marker("M - Ethernet first pkt xmit");
#else
pr_info("M - Ethernet first packet transmitted\n");
#endif
}
if (skb)
stmmac_get_tx_hwtstamp(priv, p, skb);
@ -7409,8 +7417,13 @@ int stmmac_dvr_probe(struct device *device,
u32 rxq;
int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
if (of_property_read_bool(device->of_node, "virtio-mdio"))
ndev = alloc_netdev_mqs(sizeof(struct stmmac_priv), "eth2", NET_NAME_ENUM,
ether_setup, MTL_MAX_TX_QUEUES, MTL_MAX_TX_QUEUES);
else
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_TX_QUEUES);
if (!ndev)
return -ENOMEM;

View file

@ -433,6 +433,76 @@ int stmmac_xpcs_setup(struct mii_bus *bus)
return 0;
}
/**
* stmmac_get_phy_addr
* @priv: net device structure
* @new_bus: points to the mii_bus structure
* Description: it finds the PHY address from board and phy_type
*/
int stmmac_get_phy_addr(struct stmmac_priv *priv, struct mii_bus *new_bus,
struct net_device *ndev)
{
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct device_node *np = priv->device->of_node;
unsigned int phyaddr;
int err = 0;
init_completion(&priv->plat->mdio_op);
new_bus->reset = &stmmac_mdio_reset;
new_bus->priv = ndev;
if (priv->plat->phy_type != -1) {
if (priv->plat->phy_type == PHY_1G) {
err = of_property_read_u32(np, "emac-1g-phy-addr", &phyaddr);
new_bus->read = &virtio_mdio_read;
new_bus->write = &virtio_mdio_write;
} else {
new_bus->read = &virtio_mdio_read_c45_indirect;
new_bus->write = &virtio_mdio_write_c45_indirect;
new_bus->probe_capabilities = MDIOBUS_C22_C45;
if (priv->plat->phy_type == PHY_25G &&
priv->plat->board_type == STAR_BOARD) {
err = of_property_read_u32(np,
"emac-star-cl45-phy-addr", &phyaddr);
} else {
err = of_property_read_u32(np,
"emac-air-cl45-phy-addr", &phyaddr);
}
}
} else {
err = of_property_read_u32(np, "emac-1g-phy-addr", &phyaddr);
if (err) {
new_bus->phy_mask = mdio_bus_data->phy_mask;
return -1;
}
new_bus->read = &virtio_mdio_read;
new_bus->write = &virtio_mdio_write;
/* Do MDIO reset before the bus->read call */
err = new_bus->reset(new_bus);
if (err) {
new_bus->phy_mask = ~(1 << phyaddr);
return phyaddr;
}
/* 1G phy check */
err = new_bus->read(new_bus, phyaddr, MII_BMSR);
if (err == -EBUSY || err == 0xffff) {
/* 2.5 G PHY case */
new_bus->read = &virtio_mdio_read_c45_indirect;
new_bus->write = &virtio_mdio_write_c45_indirect;
new_bus->probe_capabilities = MDIOBUS_C22_C45;
err = of_property_read_u32(np,
"emac-air-cl45-phy-addr", &phyaddr);
/* Board Type check */
err = new_bus->read(new_bus, phyaddr, MII_BMSR);
if (err == -EBUSY || !err || err == 0xffff)
err = of_property_read_u32(np,
"emac-star-cl45-phy-addr", &phyaddr);
}
}
new_bus->phy_mask = ~(1 << phyaddr);
return phyaddr;
}
/**
* stmmac_mdio_register
* @ndev: net device structure
@ -474,10 +544,9 @@ int stmmac_mdio_register(struct net_device *ndev)
err = of_property_read_bool(np, "virtio-mdio");
if (err) {
new_bus->read = &virtio_mdio_read;
new_bus->write = &virtio_mdio_write;
init_completion(&priv->plat->mdio_op);
phyaddr = stmmac_get_phy_addr(priv, new_bus, ndev);
max_addr = PHY_MAX_ADDR;
skip_phy_detect = 1;
} else if (priv->plat->has_xgmac) {
new_bus->read = &stmmac_xgmac2_mdio_read;
new_bus->write = &stmmac_xgmac2_mdio_write;
@ -501,24 +570,6 @@ int stmmac_mdio_register(struct net_device *ndev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
err = of_property_read_u32(np, "emac-phy-addr", &phyaddr);
if (err) {
new_bus->phy_mask = mdio_bus_data->phy_mask;
} else {
err = new_bus->read(new_bus, phyaddr, MII_BMSR);
if (err == -EBUSY || !err || err == 0xffff) {
err = of_property_read_u32(np, "emac-cl45-phy-addr", &phyaddr);
new_bus->phy_mask = ~(1 << phyaddr);
skip_phy_detect = 1;
new_bus->read = &virtio_mdio_read_c45_indirect;
new_bus->write = &virtio_mdio_write_c45_indirect;
new_bus->probe_capabilities = MDIOBUS_C22_C45;
} else {
new_bus->phy_mask = ~(1 << phyaddr);
skip_phy_detect = 1;
}
}
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);

View file

@ -367,6 +367,15 @@ config XILINX_GMII2RGMII
the Reduced Gigabit Media Independent Interface(RGMII) between
Ethernet physical media devices and the Gigabit Ethernet controller.
config QCA8337_SWITCH
tristate "Drivers for QTI Atheros QCA8337 switch"
help
This enables support for the QTI Atheros QCA8337 Ethernet
switch. This driver support switch funtionality over SGMII
interface.
Add downstream qca8337 driver
Support the emac driver
endif # PHYLIB
config MICREL_KS8995MA

View file

@ -88,3 +88,4 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
obj-$(CONFIG_QCA8337_SWITCH) += qca8337.o

593
drivers/net/phy/qca8337.c Normal file
View file

@ -0,0 +1,593 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Copyright (c) 2014, 2015, 2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2016 John Crispin john@phrozen.org
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Author: Matus Ujhelyi <ujhelyi.m@gmail.com>
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* QCA8337 Switch driver
*/
#include <linux/phy.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/qca8337.h>
static inline void split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{
regaddr >>= 1;
*r1 = regaddr & 0x1e;
regaddr >>= 5;
*r2 = regaddr & 0x7;
regaddr >>= 3;
*page = regaddr & 0x1ff;
}
u32 qca8337_read(struct qca8337_priv *priv, u32 reg)
{
struct phy_device *phy = priv->phy;
struct mii_bus *bus = phy->mdio.bus;
u16 r1, r2, page;
u16 lo, hi;
mutex_lock(&bus->mdio_lock);
split_addr(reg, &r1, &r2, &page);
bus->write(bus, 0x18, 0, page);
usleep_range(1000, 2000); /* wait for the page switch to propagate */
lo = bus->read(bus, 0x10 | r2, r1);
hi = bus->read(bus, 0x10 | r2, r1 + 1);
mutex_unlock(&bus->mdio_lock);
return (hi << 16) | lo;
}
EXPORT_SYMBOL_GPL(qca8337_read);
void qca8337_write(struct qca8337_priv *priv, u32 reg, u32 val)
{
struct phy_device *phy = priv->phy;
struct mii_bus *bus = phy->mdio.bus;
u16 r1, r2, r3;
u16 lo, hi;
mutex_lock(&bus->mdio_lock);
split_addr(reg, &r1, &r2, &r3);
lo = val & 0xffff;
hi = (u16)(val >> 16);
bus->write(bus, 0x18, 0, r3);
usleep_range(1000, 2000); /* wait for the page switch to propagate */
bus->write(bus, 0x10 | r2, r1, lo);
bus->write(bus, 0x10 | r2, r1 + 1, hi);
mutex_unlock(&bus->mdio_lock);
}
EXPORT_SYMBOL_GPL(qca8337_write);
static u32
qca8337_rmw(struct qca8337_priv *priv, u32 reg, u32 mask, u32 val)
{
u32 ret;
ret = priv->ops->read(priv, reg);
ret &= ~mask;
ret |= val;
priv->ops->write(priv, reg, ret);
return ret;
}
static void
qca8337_reg_set(struct qca8337_priv *priv, u32 reg, u32 val)
{
qca8337_rmw(priv, reg, 0, val);
}
static void qca8337_reset_switch(struct qca8337_priv *priv)
{
u32 val = 0;
int count = 0;
qca8337_reg_set(priv, QCA8337_REG_MASK_CTRL, QCA8337_CTRL_RESET);
/*Need wait so reset done*/
for (count = 0; count < 100; count++) {
usleep_range(5000, 10000);
val = priv->ops->read(priv, QCA8337_REG_MASK_CTRL);
if (!val && !(val & QCA8337_CTRL_RESET))
break;
}
}
static void
qca8337_port_set_status(struct qca8337_priv *priv)
{
qca8337_write(priv, QCA8337_REG_PORT_STATUS(0),
(QCA8337_PORT_SPEED_1000M | QCA8337_PORT_STATUS_TXMAC |
QCA8337_PORT_STATUS_RXMAC | QCA8337_PORT_STATUS_TXFLOW |
QCA8337_PORT_STATUS_RXFLOW | QCA8337_PORT_STATUS_DUPLEX));
qca8337_write(priv, QCA8337_REG_PORT_STATUS(6),
(QCA8337_PORT_SPEED_1000M | QCA8337_PORT_STATUS_TXMAC |
QCA8337_PORT_STATUS_RXMAC | QCA8337_PORT_STATUS_TXFLOW |
QCA8337_PORT_STATUS_RXFLOW | QCA8337_PORT_STATUS_DUPLEX));
}
static int
qca8337_busy_wait(struct qca8337_priv *priv, u32 reg, u32 mask)
{
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(20);
/* loop until the busy flag has cleared */
do {
u32 val = priv->ops->read(priv, reg);
int busy = val & mask;
if (!busy)
break;
cond_resched();
} while (!time_after_eq(jiffies, timeout));
return time_after_eq(jiffies, timeout);
}
static void
qca8337_mib_init(struct qca8337_priv *priv)
{
qca8337_reg_set(priv, QCA8337_REG_MIB,
QCA8337_MIB_FLUSH | QCA8337_MIB_BUSY);
qca8337_busy_wait(priv, QCA8337_REG_MIB, QCA8337_MIB_BUSY);
qca8337_reg_set(priv, QCA8337_REG_MIB, QCA8337_MIB_CPU_KEEP);
priv->ops->write(priv, QCA8337_REG_MODULE_EN, QCA8337_MODULE_EN_MIB);
}
static void qca8337_vlan_config(struct qca8337_priv *priv)
{
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(0), 0x0014007e);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(0), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(1), 0x0014007d);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(1), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(2), 0x0014007b);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(2), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(3), 0x00140077);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(3), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(4), 0x0014006f);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(4), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(5), 0x0014005f);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(5), 0x10001);
priv->ops->write(priv, QCA8337_REG_PORT_LOOKUP(6), 0x0014001e);
priv->ops->write(priv, QCA8337_REG_PORT_VLAN0(6), 0x10001);
}
static int qca8337_hw_init(struct qca8337_priv *priv)
{
int i;
/* set pad control for cpu port */
qca8337_write(priv, QCA8337_REG_PAD0_CTRL, QCA8337_PAD_SGMII_EN);
qca8337_write(priv, QCA8337_REG_PAD5_CTRL,
QCA8337_PAD_RGMII_RXCLK_DELAY_EN);
qca8337_write(priv, QCA8337_REG_PAD6_CTRL,
(QCA8337_PAD_RGMII_EN | QCA8337_PAD_RGMII_RXCLK_DELAY_EN |
(0x1 << QCA8337_PAD_RGMII_TXCLK_DELAY_SEL_S) |
(0x2 << QCA8337_PAD_RGMII_RXCLK_DELAY_SEL_S)));
/* Enable CPU Port */
qca8337_reg_set(priv, QCA8337_REG_GLOBAL_FW_CTRL0,
QCA8337_GLOBAL_FW_CTRL0_CPU_PORT_EN);
qca8337_port_set_status(priv);
/* Enable MIB counters */
qca8337_mib_init(priv);
/* Disable QCA header mode on the cpu port */
priv->ops->write(priv, QCA8337_REG_PORT_HEADER(priv->cpu_port), 0);
/* Disable forwarding by default on all ports */
for (i = 0; i < priv->ports; i++)
qca8337_rmw(priv, QCA8337_REG_PORT_LOOKUP(i),
QCA8337_PORT_LOOKUP_MEMBER, 0);
qca8337_write(priv, QCA8337_REG_GLOBAL_FW_CTRL1,
(QCA8337_IGMP_JOIN_LEAVE_DPALL | QCA8337_BROAD_DPALL |
QCA8337_MULTI_FLOOD_DPALL | QCA8337_UNI_FLOOD_DPALL));
/* Setup connection between CPU port & user ports */
qca8337_vlan_config(priv);
/* Disable AZ */
priv->ops->write(priv, QCA8337_REG_EEE_CTRL, QCA8337_EEE_CTRL_DISABLE);
return 0;
}
static void qca8337_reg_init_lan(struct qca8337_priv *priv)
{
priv->ops->write(priv, QCA8337_REG_POWER_ON_STRIP,
QCA8337_REG_POS_VAL);
priv->ops->write(priv, QCA8337_MAC_PWR_SEL,
QCA8337_MAC_PWR_SEL_VAL);
priv->ops->write(priv, QCA8337_SGMII_CTRL_REG,
QCA8337_SGMII_CTRL_VAL);
}
static void
qca8337_read_port_link(struct qca8337_priv *priv, int port,
struct port_link_info *port_link)
{
u32 status;
u32 speed;
memset(port_link, '\0', sizeof(*port_link));
status = priv->ops->read(priv, QCA8337_REG_PORT_STATUS(port));
port_link->aneg = !!(status & QCA8337_PORT_STATUS_LINK_AUTO);
if (port_link->aneg || port != priv->cpu_port) {
port_link->link = !!(status & QCA8337_PORT_STATUS_LINK_UP);
if (!port_link->link)
return;
} else {
port_link->link = true;
}
port_link->duplex = !!(status & QCA8337_PORT_STATUS_DUPLEX);
port_link->tx_flow = !!(status & QCA8337_PORT_STATUS_TXFLOW);
port_link->rx_flow = !!(status & QCA8337_PORT_STATUS_RXFLOW);
speed = (status & QCA8337_PORT_STATUS_SPEED) >>
QCA8337_PORT_STATUS_SPEED_S;
switch (speed) {
case QCA8337_PORT_SPEED_10M:
port_link->speed = SPEED_10;
break;
case QCA8337_PORT_SPEED_100M:
port_link->speed = SPEED_100;
break;
case QCA8337_PORT_SPEED_1000M:
port_link->speed = SPEED_1000;
break;
default:
port_link->speed = SPEED_UNKNOWN;
break;
}
}
static void qca8337_phy_enable(struct phy_device *phydev)
{
int phyid = 0;
ushort phy_val;
struct mii_bus *bus;
struct qca8337_priv *priv = phydev->priv;
bus = priv->phy->mdio.bus;
if (phydev->autoneg == AUTONEG_ENABLE) {
int port;
for (port = 1; port < priv->ports - 1; port++)
qca8337_write(priv, QCA8337_REG_PORT_STATUS(port),
0x1280);
for (phyid = 0; phyid < priv->num_phy ; phyid++) {
/*enable phy prefer multi-port mode*/
phy_val = mdiobus_read(bus, phyid, MII_CTRL1000);
phy_val |= (ADVERTISE_MULTI_PORT_PREFER |
ADVERTISE_1000FULL);
mdiobus_write(bus, phyid, MII_CTRL1000, phy_val);
/*enable extended next page. 0:enable, 1:disable*/
phy_val = mdiobus_read(bus, phyid, MII_ADVERTISE);
phy_val &= (~(ADVERTISE_RESV));
mdiobus_write(bus, phyid, MII_ADVERTISE, phy_val);
/*Phy power up*/
mdiobus_write(bus, phyid, MII_BMCR, (BMCR_RESET |
BMCR_ANENABLE));
/* wait for the page switch to propagate */
usleep_range(100, 200);
}
} else {
int port;
u32 status = 0;
linkmode_and(phydev->advertising, phydev->advertising, phydev->supported);
for (port = 1; port < priv->ports - 1; port++) {
status = 0;
status |= phydev->duplex ?
QCA8337_PORT_STATUS_DUPLEX : 0;
status |= (linkmode_test_bit(ADVERTISED_Asym_Pause, phydev->advertising)) ?
QCA8337_PORT_STATUS_TXFLOW : 0;
status |= (linkmode_test_bit(ADVERTISED_Pause, phydev->advertising)) ?
QCA8337_PORT_STATUS_RXFLOW : 0;
if (phydev->speed == SPEED_1000)
status |= QCA8337_PORT_SPEED_1000M;
else if (phydev->speed == SPEED_100)
status |= QCA8337_PORT_SPEED_100M;
else if (phydev->speed == SPEED_10)
status |= QCA8337_PORT_SPEED_10M;
qca8337_write(priv, QCA8337_REG_PORT_STATUS(port),
status);
/* wait for the page switch to propagate */
usleep_range(100, 200);
status |= QCA8337_PORT_STATUS_TXMAC |
QCA8337_PORT_STATUS_RXMAC;
qca8337_write(priv, QCA8337_REG_PORT_STATUS(port),
status);
}
for (phyid = 0; phyid < priv->num_phy ; phyid++) {
phydev->drv->phy_id = phyid;
genphy_setup_forced(phydev);
}
for (phyid = 0; phyid < priv->num_phy ; phyid++) {
phydev->drv->phy_id = phyid;
genphy_update_link(phydev);
if (phydev->link)
break;
}
}
}
static int qca8337_config_aneg(struct phy_device *phydev)
{
qca8337_phy_enable(phydev);
return 0;
}
static int qca8337_read_status(struct phy_device *phydev)
{
struct qca8337_priv *priv = phydev->priv;
struct port_link_info port_link;
int i, port_status = 0;
int speed = -1, duplex = 0;
for (i = 1; i < priv->ports - 1; i++) {
qca8337_read_port_link(priv, i, &port_link);
if (port_link.link) {
speed = (speed < port_link.speed) ?
port_link.speed : speed;
duplex = (duplex < port_link.duplex) ?
port_link.duplex : duplex;
port_status |= 1 << i;
}
}
qca8337_read_port_link(priv, priv->cpu_port, &port_link);
phydev->link = (port_status) ? !!port_link.link : 0;
phydev->speed = speed;
phydev->duplex = duplex;
return 0;
}
static int qca8337_aneg_done(struct phy_device *phydev)
{
int phyid = 0;
int retval = 0;
int aneg_status = 0;
struct qca8337_priv *priv = phydev->priv;
struct mii_bus *bus = priv->phy->mdio.bus;
for (phyid = 0; phyid < priv->num_phy ; phyid++) {
retval = mdiobus_read(bus, phyid, MII_BMSR);
if (retval < 0)
return retval;
(retval & BMSR_ANEGCOMPLETE) ?
(aneg_status |= 1 << phyid) :
(aneg_status |= 0 << phyid);
}
return aneg_status;
}
static int
qca8337_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8337_priv *priv = (struct qca8337_priv *)ctx;
if (!priv->phy->link)
return -EPERM;
*val = priv->ops->read(priv, reg);
return 0;
}
static int
qca8337_regmap_write(void *ctx, uint32_t reg, uint32_t val)
{
struct qca8337_priv *priv = (struct qca8337_priv *)ctx;
if (!priv->phy->link)
return -EPERM;
priv->ops->write(priv, reg, val);
return 0;
}
static const struct regmap_range qca8337_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control registers */
regmap_reg_range(0x0100, 0x0168), /* EEE control registers */
regmap_reg_range(0x0200, 0x0270), /* Parser control registers */
regmap_reg_range(0x0400, 0x0454), /* ACL control registers */
regmap_reg_range(0x0600, 0x0718), /* Lookup control registers */
regmap_reg_range(0x0800, 0x0b70), /* QM control registers */
regmap_reg_range(0x0c00, 0x0c80), /* PKT edit control registers */
regmap_reg_range(0x0e00, 0x0e98), /* L3 */
regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
};
static const struct regmap_access_table qca8337_readable_table = {
.yes_ranges = qca8337_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(qca8337_readable_ranges),
};
static struct regmap_config qca8337_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
.reg_read = qca8337_regmap_read,
.reg_write = qca8337_regmap_write,
.rd_table = &qca8337_readable_table,
};
static int qca8337_config_init(struct phy_device *phydev)
{
struct qca8337_priv *priv = phydev->priv;
int ret = 0;
/*Software reset*/
priv->ops->reset_switch(priv);
/* Add delay to settle reset */
usleep_range(100, 200);
ret = priv->ops->hw_init(priv);
if (ret)
return ret;
qca8337_reg_init_lan(priv);
return 0;
}
static struct qca8337_switch_ops switch_ops = {
.hw_init = qca8337_hw_init,
.reset_switch = qca8337_reset_switch,
.read = qca8337_read,
.write = qca8337_write,
};
static int qca8337_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct qca8337_priv *priv = NULL;
u32 val = 0;
u16 id = 0;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->phy = phydev;
priv->dev = &phydev->mdio.dev;
priv->cpu_port = QCA8337_CPU_PORT;
priv->vlans = QCA8337_MAX_VLANS;
priv->ports = QCA8337_NUM_PORTS;
priv->num_phy = QCA8337_NUM_PHYS;
priv->ops = &switch_ops;
/* Setup the register mapping */
priv->regmap = devm_regmap_init(priv->dev, NULL, priv,
&qca8337_regmap_config);
if (IS_ERR(priv->regmap))
pr_warn("regmap initialization failed\n");
/* read the switches ID register */
val = qca8337_read(priv, QCA8337_REG_MASK_CTRL);
id = val & (QCA8337_CTRL_REVISION | QCA8337_CTRL_VERSION);
priv->chip_ver = (id & QCA8337_CTRL_VERSION) >> QCA8337_CTRL_VERSION_S;
priv->chip_rev = (id & QCA8337_CTRL_REVISION);
if (priv->chip_ver != QCA8337_ID_QCA8337) {
dev_err(dev, "qca8337: unknown Atheros device\n");
dev_err(dev, "[ver=%d, rev=%d, phy_id=%04x%04x]\n",
priv->chip_ver, priv->chip_rev,
mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 2),
mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 3));
return -ENODEV;
}
dev_dbg(dev, "qca8337: Switch probed successfully ");
dev_dbg(dev, "[ver=%d, rev=%d, phy_id=%04x%04x]\n",
priv->chip_ver, priv->chip_rev,
mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 2),
mdiobus_read(priv->phy->mdio.bus, priv->phy->drv->phy_id, 3));
phydev->priv = priv;
return 0;
}
static void qca8337_remove(struct phy_device *phydev)
{
struct qca8337_priv *priv = phydev->priv;
if (!priv)
return;
}
static struct phy_driver qca8337_driver = {
.phy_id = QCA8337_PHY_ID,
.name = "Atheros QCA8337",
.phy_id_mask = 0xffffffef,
.probe = qca8337_probe,
.config_init = qca8337_config_init,
.features = PHY_GBIT_FEATURES,
.flags = PHY_IS_INTERNAL,
.config_aneg = qca8337_config_aneg,
.read_status = qca8337_read_status,
.aneg_done = qca8337_aneg_done,
.remove = qca8337_remove,
};
static int __init qca8337_init(void)
{
return phy_driver_register(&qca8337_driver, THIS_MODULE);
}
static void __exit qca8337_exit(void)
{
phy_driver_unregister(&qca8337_driver);
}
module_init(qca8337_init);
module_exit(qca8337_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:qca8337");

View file

@ -37,6 +37,7 @@ source "drivers/net/wireless/st/Kconfig"
source "drivers/net/wireless/ti/Kconfig"
source "drivers/net/wireless/zydas/Kconfig"
source "drivers/net/wireless/quantenna/Kconfig"
source "drivers/net/wireless/cnss/Kconfig"
config PCMCIA_RAYCS
tristate "Aviator/Raytheon 2.4GHz wireless support"

View file

@ -32,3 +32,4 @@ obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_VIRT_WIFI) += virt_wifi.o
obj-$(CONFIG_CNSS) += cnss/

View file

@ -0,0 +1,99 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# cnss device configuration
#
config CNSS
tristate "CNSS driver for wifi module"
select CNSS_UTILS
select CRYPTO
select CRYPTO_HASH
select CRYPTO_BLKCIPHER
help
This module adds support for the CNSS connectivity subsystem used
for wifi devices based on the QCA AR6320 chipset.
This driver also adds support to integrate WLAN module to subsystem
restart framework.
config CNSS_SDIO
bool "Enable/disable cnss sdio platform driver for wifi module"
depends on CNSS
depends on MMC
help
This module adds support for the CNSS wlan module interfaced
with SDIO bus.
This driver also adds support to integrate WLAN module to subsystem
restart framework, power on WLAN chip and registered the WLAN module
as a SDIO client device.
config CNSS_MAC_BUG
bool "Enable/disable 0-4K memory initialization for QCA6174"
depends on CNSS
help
If enabled, 0-4K memory is reserved for QCA6174 to address
a MAC HW bug. MAC would do an invalid pointer fetch based on
the data, that was read from 0 to 4K. So fill it with zero's;
to an address for which PCIe root complex would honor the read
without any errors.
config CLD_DEBUG
bool "Enable/disable CLD debug features"
help
WLAN CLD driver uses this config to enable certain debug features.
Some of the debug features may affect performance or may compromise
on security.
Say N, if you are building a release kernel for production use.
Only say Y, if you are building a kernel with debug support.
config CLD_USB_CORE
tristate "Qualcomm Technologies Inc. Core wlan driver for QCA USB interface"
select WIRELESS_EXT
select WEXT_PRIV
select WEXT_CORE
select WEXT_SPY
select NL80211_TESTMODE
help
This section contains the necessary modules needed to enable the
core WLAN driver for Qualcomm Technologies Inc USB wlan chipset.
Select Y to compile the driver in order to have WLAN functionality
support.
config CLD_HL_SDIO_CORE
tristate "Qualcomm Technologies Inc. Core wlan driver for QCA SDIO interface"
select WIRELESS_EXT
select WEXT_PRIV
select WEXT_CORE
select WEXT_SPY
select NL80211_TESTMODE
depends on ARCH_QCOM
depends on MMC
config CLD_LL_CORE
tristate "Qualcomm Technologies Inc. Core wlan driver"
select NL80211_TESTMODE
select WEXT_CORE
select WEXT_PRIV
select WEXT_SPY
select WIRELESS_EXT
help
This section contains the necessary modules needed to enable the
core WLAN driver for Qualcomm Technologies Inc QCA6174 chipset.
Select Y to compile the driver in order to have WLAN functionality
support.
config CNSS_SECURE_FW
bool "Enable/Disable Memory Allocation for Secure Firmware Feature"
depends on CNSS
help
CLD Driver can use this for holding local copy of firmware
binaries which is used for sha crypto computation.
The Memory Allocation is done only if this Config Parameter is
enabled
config WLAN_FEATURE_RX_WAKELOCK
bool "Enable RX wake lock feature"
help
Enable WLAN_FEATURE_HOLD_RX_WAKELOCK which is required to take rx
wakelock when driver receives packets from fw.

View file

@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Makefile for CNSS platform driver
#
obj-$(CONFIG_CNSS_SDIO) += cnss_sdio.o
obj-$(CONFIG_CNSS) += cnss_common.o

View file

@ -0,0 +1,438 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/pm_wakeup.h>
#include <linux/sched/debug.h>
#include <linux/suspend.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <net/cnss.h>
#include "cnss_common.h"
#include <net/cfg80211.h>
#define AR6320_REV1_VERSION 0x5000000
#define AR6320_REV1_1_VERSION 0x5000001
#define AR6320_REV1_3_VERSION 0x5000003
#define AR6320_REV2_1_VERSION 0x5010000
#define AR6320_REV3_VERSION 0x5020000
#define AR6320_REV3_2_VERSION 0x5030000
#define AR900B_DEV_VERSION 0x1000000
#define QCA9377_REV1_1_VERSION 0x5020001
static struct cnss_fw_files FW_FILES_QCA6174_FW_1_1 = {
"qwlan11.bin", "bdwlan11.bin", "otp11.bin", "utf11.bin",
"utfbd11.bin", "epping11.bin", "evicted11.bin"};
static struct cnss_fw_files FW_FILES_QCA6174_FW_2_0 = {
"qwlan20.bin", "bdwlan20.bin", "otp20.bin", "utf20.bin",
"utfbd20.bin", "epping20.bin", "evicted20.bin"};
static struct cnss_fw_files FW_FILES_QCA6174_FW_1_3 = {
"qwlan13.bin", "bdwlan13.bin", "otp13.bin", "utf13.bin",
"utfbd13.bin", "epping13.bin", "evicted13.bin"};
static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
"qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
"utfbd30.bin", "epping30.bin", "evicted30.bin"};
static struct cnss_fw_files FW_FILES_DEFAULT = {
"qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
"utfbd.bin", "epping.bin", "evicted.bin"};
enum cnss_dev_bus_type {
CNSS_BUS_NONE = -1,
CNSS_BUS_PCI,
CNSS_BUS_SDIO
};
static DEFINE_MUTEX(unsafe_channel_list_lock);
static DEFINE_MUTEX(dfs_nol_info_lock);
static struct cnss_unsafe_channel_list {
u16 unsafe_ch_count;
u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
} unsafe_channel_list;
static struct cnss_dfs_nol_info {
void *dfs_nol_info;
u16 dfs_nol_info_len;
} dfs_nol_info;
static enum cnss_cc_src cnss_cc_source = CNSS_SOURCE_CORE;
int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
{
mutex_lock(&unsafe_channel_list_lock);
if (!unsafe_ch_list || ch_count > CNSS_MAX_CH_NUM) {
mutex_unlock(&unsafe_channel_list_lock);
return -EINVAL;
}
unsafe_channel_list.unsafe_ch_count = ch_count;
if (ch_count != 0) {
memcpy((char *)unsafe_channel_list.unsafe_ch_list,
(char *)unsafe_ch_list, ch_count * sizeof(u16));
}
mutex_unlock(&unsafe_channel_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(cnss_set_wlan_unsafe_channel);
int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
u16 *ch_count, u16 buf_len)
{
mutex_lock(&unsafe_channel_list_lock);
if (!unsafe_ch_list || !ch_count) {
mutex_unlock(&unsafe_channel_list_lock);
return -EINVAL;
}
if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
mutex_unlock(&unsafe_channel_list_lock);
return -ENOMEM;
}
*ch_count = unsafe_channel_list.unsafe_ch_count;
memcpy((char *)unsafe_ch_list,
(char *)unsafe_channel_list.unsafe_ch_list,
unsafe_channel_list.unsafe_ch_count * sizeof(u16));
mutex_unlock(&unsafe_channel_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(cnss_get_wlan_unsafe_channel);
int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
{
void *temp;
struct cnss_dfs_nol_info *dfs_info;
mutex_lock(&dfs_nol_info_lock);
if (!info || !info_len) {
mutex_unlock(&dfs_nol_info_lock);
return -EINVAL;
}
temp = kmemdup(info, info_len, GFP_KERNEL);
if (!temp) {
mutex_unlock(&dfs_nol_info_lock);
return -ENOMEM;
}
dfs_info = &dfs_nol_info;
kfree(dfs_info->dfs_nol_info);
dfs_info->dfs_nol_info = temp;
dfs_info->dfs_nol_info_len = info_len;
mutex_unlock(&dfs_nol_info_lock);
return 0;
}
EXPORT_SYMBOL_GPL(cnss_wlan_set_dfs_nol);
int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
{
int len;
struct cnss_dfs_nol_info *dfs_info;
mutex_lock(&dfs_nol_info_lock);
if (!info || !info_len) {
mutex_unlock(&dfs_nol_info_lock);
return -EINVAL;
}
dfs_info = &dfs_nol_info;
if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
mutex_unlock(&dfs_nol_info_lock);
return -ENOENT;
}
len = min(info_len, dfs_info->dfs_nol_info_len);
memcpy(info, dfs_info->dfs_nol_info, len);
mutex_unlock(&dfs_nol_info_lock);
return len;
}
EXPORT_SYMBOL_GPL(cnss_wlan_get_dfs_nol);
void cnss_init_work(struct work_struct *work, work_func_t func)
{
INIT_WORK(work, func);
}
EXPORT_SYMBOL_GPL(cnss_init_work);
void cnss_flush_work(void *work)
{
struct work_struct *cnss_work = work;
cancel_work_sync(cnss_work);
}
EXPORT_SYMBOL_GPL(cnss_flush_work);
void cnss_flush_delayed_work(void *dwork)
{
struct delayed_work *cnss_dwork = dwork;
cancel_delayed_work_sync(cnss_dwork);
}
EXPORT_SYMBOL_GPL(cnss_flush_delayed_work);
void cnss_pm_wake_lock_init(struct wakeup_source **ws, const char *name)
{
*ws = wakeup_source_register(NULL, name);
}
EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_init);
void cnss_pm_wake_lock(struct wakeup_source *ws)
{
__pm_stay_awake(ws);
}
EXPORT_SYMBOL_GPL(cnss_pm_wake_lock);
void cnss_pm_wake_lock_timeout(struct wakeup_source *ws, ulong msec)
{
__pm_wakeup_event(ws, msec);
}
EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_timeout);
void cnss_pm_wake_lock_release(struct wakeup_source *ws)
{
__pm_relax(ws);
}
EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_release);
void cnss_pm_wake_lock_destroy(struct wakeup_source *ws)
{
wakeup_source_unregister(ws);
}
EXPORT_SYMBOL_GPL(cnss_pm_wake_lock_destroy);
void cnss_get_monotonic_boottime(struct timespec64 *ts)
{
ktime_get_boottime_ts64(ts);
}
EXPORT_SYMBOL_GPL(cnss_get_monotonic_boottime);
void cnss_get_boottime(struct timespec64 *ts)
{
ktime_get_ts64(ts);
}
EXPORT_SYMBOL_GPL(cnss_get_boottime);
void cnss_init_delayed_work(struct delayed_work *work, work_func_t func)
{
INIT_DELAYED_WORK(work, func);
}
EXPORT_SYMBOL_GPL(cnss_init_delayed_work);
int cnss_vendor_cmd_reply(struct sk_buff *skb)
{
return cfg80211_vendor_cmd_reply(skb);
}
EXPORT_SYMBOL_GPL(cnss_vendor_cmd_reply);
int cnss_set_cpus_allowed_ptr(struct task_struct *task, ulong cpu)
{
return set_cpus_allowed_ptr(task, cpumask_of(cpu));
}
EXPORT_SYMBOL_GPL(cnss_set_cpus_allowed_ptr);
/* wlan prop driver cannot invoke show_stack
* function directly, so to invoke this function it
* call wcnss_dump_stack function
*/
void cnss_dump_stack(struct task_struct *task)
{
show_stack(task, NULL, NULL);
}
EXPORT_SYMBOL_GPL(cnss_dump_stack);
struct cnss_dev_platform_ops *cnss_get_platform_ops(struct device *dev)
{
if (!dev)
return NULL;
else
return dev->platform_data;
}
int cnss_common_request_bus_bandwidth(struct device *dev, int bandwidth)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->request_bus_bandwidth)
return pf_ops->request_bus_bandwidth(bandwidth);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_common_request_bus_bandwidth);
void *cnss_common_get_virt_ramdump_mem(struct device *dev, unsigned long *size)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->get_virt_ramdump_mem)
return pf_ops->get_virt_ramdump_mem(size);
else
return NULL;
}
EXPORT_SYMBOL_GPL(cnss_common_get_virt_ramdump_mem);
void cnss_common_device_self_recovery(struct device *dev)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->device_self_recovery)
pf_ops->device_self_recovery();
}
EXPORT_SYMBOL_GPL(cnss_common_device_self_recovery);
void cnss_common_schedule_recovery_work(struct device *dev)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->schedule_recovery_work)
pf_ops->schedule_recovery_work();
}
EXPORT_SYMBOL_GPL(cnss_common_schedule_recovery_work);
void cnss_common_device_crashed(struct device *dev)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->device_crashed)
pf_ops->device_crashed();
}
EXPORT_SYMBOL_GPL(cnss_common_device_crashed);
u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->get_wlan_mac_address)
return pf_ops->get_wlan_mac_address(num);
else
return NULL;
}
EXPORT_SYMBOL_GPL(cnss_common_get_wlan_mac_address);
int cnss_common_set_wlan_mac_address(struct device *dev, const u8 *in, u32 len)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->set_wlan_mac_address)
return pf_ops->set_wlan_mac_address(in, len);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_common_set_wlan_mac_address);
int cnss_power_up(struct device *dev)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->power_up)
return pf_ops->power_up(dev);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_power_up);
int cnss_power_down(struct device *dev)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->power_down)
return pf_ops->power_down(dev);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_power_down);
void cnss_get_qca9377_fw_files(struct cnss_fw_files *pfw_files,
u32 size, u32 tufello_dual_fw)
{
if (tufello_dual_fw)
memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
else
memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
}
EXPORT_SYMBOL_GPL(cnss_get_qca9377_fw_files);
int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
u32 target_type, u32 target_version)
{
if (!pfw_files)
return -ENODEV;
switch (target_version) {
case AR6320_REV1_VERSION:
case AR6320_REV1_1_VERSION:
memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_1, sizeof(*pfw_files));
break;
case AR6320_REV1_3_VERSION:
memcpy(pfw_files, &FW_FILES_QCA6174_FW_1_3, sizeof(*pfw_files));
break;
case AR6320_REV2_1_VERSION:
memcpy(pfw_files, &FW_FILES_QCA6174_FW_2_0, sizeof(*pfw_files));
break;
case AR6320_REV3_VERSION:
case AR6320_REV3_2_VERSION:
memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
break;
default:
memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
pr_err("%s default version 0x%X 0x%X\n", __func__,
target_type, target_version);
break;
}
return 0;
}
EXPORT_SYMBOL_GPL(cnss_get_fw_files_for_target);
void cnss_set_cc_source(enum cnss_cc_src cc_source)
{
cnss_cc_source = cc_source;
}
EXPORT_SYMBOL_GPL(cnss_set_cc_source);
enum cnss_cc_src cnss_get_cc_source(void)
{
return cnss_cc_source;
}
EXPORT_SYMBOL_GPL(cnss_get_cc_source);
const char *cnss_wlan_get_evicted_data_file(void)
{
return FW_FILES_QCA6174_FW_3_0.evicted_data;
}
int cnss_common_register_tsf_captured_handler(struct device *dev,
irq_handler_t handler, void *ctx)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->register_tsf_captured_handler)
return pf_ops->register_tsf_captured_handler(handler, ctx);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_common_register_tsf_captured_handler);
int cnss_common_unregister_tsf_captured_handler(struct device *dev,
void *ctx)
{
struct cnss_dev_platform_ops *pf_ops = cnss_get_platform_ops(dev);
if (pf_ops && pf_ops->unregister_tsf_captured_handler)
return pf_ops->unregister_tsf_captured_handler(ctx);
else
return -EINVAL;
}
EXPORT_SYMBOL_GPL(cnss_common_unregister_tsf_captured_handler);

View file

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _NET_CNSS_COMMON_H_
#define _NET_CNSS_COMMON_H_
/* max 20mhz channel count */
#define CNSS_MAX_CH_NUM 45
struct cnss_cap_tsf_info {
int irq_num;
void *context;
irq_handler_t irq_handler;
};
struct cnss_dev_platform_ops {
int (*request_bus_bandwidth)(int bandwidth);
void* (*get_virt_ramdump_mem)(unsigned long *size);
void (*device_self_recovery)(void);
void (*schedule_recovery_work)(void);
void (*device_crashed)(void);
u8 * (*get_wlan_mac_address)(u32 *num);
int (*set_wlan_mac_address)(const u8 *in, u32 len);
int (*power_up)(struct device *dev);
int (*power_down)(struct device *dev);
int (*register_tsf_captured_handler)(irq_handler_t handler,
void *adapter);
int (*unregister_tsf_captured_handler)(void *adapter);
};
int cnss_pci_request_bus_bandwidth(int bandwidth);
int cnss_sdio_request_bus_bandwidth(int bandwidth);
void cnss_sdio_device_crashed(void);
void cnss_pci_device_crashed(void);
void cnss_pci_device_self_recovery(void);
void cnss_sdio_device_self_recovery(void);
void *cnss_pci_get_virt_ramdump_mem(unsigned long *size);
void *cnss_sdio_get_virt_ramdump_mem(unsigned long *size);
void cnss_sdio_schedule_recovery_work(void);
void cnss_pci_schedule_recovery_work(void);
int cnss_pcie_set_wlan_mac_address(const u8 *in, u32 len);
int cnss_sdio_set_wlan_mac_address(const u8 *in, u32 len);
u8 *cnss_pci_get_wlan_mac_address(u32 *num);
u8 *cnss_sdio_get_wlan_mac_address(u32 *num);
int cnss_sdio_power_up(struct device *dev);
int cnss_sdio_power_down(struct device *dev);
int cnss_pcie_power_up(struct device *dev);
int cnss_pcie_power_down(struct device *dev);
const char *cnss_wlan_get_evicted_data_file(void);
#endif /* _NET_CNSS_COMMON_H_ */

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -295,6 +295,18 @@ config BATTERY_BQ27XXX_DT_UPDATES_NVM
general-purpose kernels, as this can cause misconfiguration of a
smart battery with embedded NVM/flash.
config BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
bool "BQ27xxx resistance table update of NVM/flash data memory"
depends on BATTERY_BQ27XXX_DT_UPDATES_NVM
help
Say Y here to enable devicetree monitored-battery resistance table config
and Qmax-cell0 value in the NVM/flash data memory. Only enable this option
when calibrated resistance table and Qmax-Cell0 parameters for the battery
in-use are updated in DT. If the Battery specific data is not available
in DT, then this config should not be set to Y. Not for general-purpose
kernels, as this can cause is the configuration of a smart battery with
embedded NVM/flash.
config BATTERY_DA9030
tristate "DA9030 battery driver"
depends on PMIC_DA903X

View file

@ -18,6 +18,8 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/extcon-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/of_gpio.h>
#define BQ256XX_MANUFACTURER "Texas Instruments"
@ -145,6 +147,8 @@
#define BQ256XX_REG_RST BIT(7)
#define BQ256XX_MAX_INPUT_VOLTAGE_UV 5400000
/**
* struct bq256xx_init_data -
* @ichg: fast charge current
@ -216,6 +220,7 @@ enum bq256xx_id {
* @charger: power supply registered for the charger
* @battery: power supply registered for the battery
* @lock: mutex lock structure
* @irq_lock: mutex lock structure for irq
*
* @usb2_phy: usb_phy identifier
* @usb3_phy: usb_phy identifier
@ -229,6 +234,9 @@ enum bq256xx_id {
* @chip_info: device variant information
* @state: device status and faults
* @watchdog_timer: watchdog timer value in milliseconds
*
* @irq_waiting: flag for status of irq waiting
* @resume_completed: suspend/resume flag
*/
struct bq256xx_device {
struct i2c_client *client;
@ -236,6 +244,8 @@ struct bq256xx_device {
struct power_supply *charger;
struct power_supply *battery;
struct mutex lock;
struct mutex irq_lock;
struct regmap *regmap;
struct usb_phy *usb2_phy;
@ -252,6 +262,11 @@ struct bq256xx_device {
int watchdog_timer;
/* extcon for VBUS / ID notification to USB*/
struct extcon_dev *extcon;
bool irq_waiting;
bool resume_completed;
/* debug_board_gpio to deteect the debug board*/
int debug_board_gpio;
};
/**
@ -1170,6 +1185,15 @@ static irqreturn_t bq256xx_irq_handler_thread(int irq, void *private)
struct bq256xx_state state;
int ret;
mutex_lock(&bq->irq_lock);
bq->irq_waiting = true;
if (!bq->resume_completed) {
pr_debug("IRQ triggered before device-resume\n");
disable_irq_nosync(irq);
mutex_unlock(&bq->irq_lock);
return IRQ_HANDLED;
}
ret = bq256xx_get_state(bq, &state);
if (ret < 0)
goto irq_out;
@ -1184,6 +1208,8 @@ static irqreturn_t bq256xx_irq_handler_thread(int irq, void *private)
power_supply_changed(bq->charger);
irq_out:
bq->irq_waiting = false;
mutex_unlock(&bq->irq_lock);
return IRQ_HANDLED;
}
@ -1524,6 +1550,30 @@ static int bq256xx_power_supply_init(struct bq256xx_device *bq,
return 0;
}
static int bq256xx_debug_board_detect(struct bq256xx_device *bq)
{
int ret = 0;
if (!of_find_property(bq->dev->of_node, "debugboard-detect-gpio", NULL))
return ret;
bq->debug_board_gpio = of_get_named_gpio(bq->dev->of_node,
"debugboard-detect-gpio", 0);
if (IS_ERR(&bq->debug_board_gpio)) {
ret = PTR_ERR(&bq->debug_board_gpio);
dev_err(bq->dev, "Failed to initialize debugboard_detecte gpio\n");
return ret;
}
gpio_direction_input(bq->debug_board_gpio);
if (gpio_get_value(bq->debug_board_gpio)) {
bq->init_data.vindpm = BQ256XX_MAX_INPUT_VOLTAGE_UV;
dev_info(bq->dev,
"debug_board detected, setting vindpm to %d\n", bq->init_data.vindpm);
}
return ret;
}
static int bq256xx_hw_init(struct bq256xx_device *bq)
{
struct power_supply_battery_info *bat_info;
@ -1579,6 +1629,10 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
bat_info->constant_charge_voltage_max_uv;
}
ret = bq256xx_debug_board_detect(bq);
if (ret)
return ret;
ret = bq->chip_info->bq256xx_set_vindpm(bq, bq->init_data.vindpm);
if (ret)
return ret;
@ -1661,8 +1715,10 @@ static int bq256xx_probe(struct i2c_client *client,
bq->client = client;
bq->dev = dev;
bq->chip_info = &bq256xx_chip_info_tbl[id->driver_data];
bq->resume_completed = true;
mutex_init(&bq->lock);
mutex_init(&bq->irq_lock);
strncpy(bq->model_name, id->name, I2C_NAME_SIZE);
@ -1701,18 +1757,6 @@ static int bq256xx_probe(struct i2c_client *client,
usb_register_notifier(bq->usb3_phy, &bq->usb_nb);
}
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq256xx_irq_handler_thread,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
dev_name(&client->dev), bq);
if (ret < 0) {
dev_err(dev, "get irq fail: %d\n", ret);
return ret;
}
}
ret = bq256xx_power_supply_init(bq, &psy_cfg, dev);
if (ret) {
dev_err(dev, "Failed to register power supply\n");
@ -1747,6 +1791,23 @@ static int bq256xx_probe(struct i2c_client *client,
extcon_set_state_sync(bq->extcon, EXTCON_USB, !!state.vbus_gd);
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq256xx_irq_handler_thread,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
dev_name(&client->dev), bq);
if (ret < 0) {
dev_err(dev, "get irq fail: %d\n", ret);
return ret;
}
enable_irq_wake(client->irq);
}
dev_dbg(dev, "bq256xx successfully probed. charger=0x%x\n",
state.vbus_gd);
return ret;
}
@ -1786,11 +1847,92 @@ static const struct acpi_device_id bq256xx_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, bq256xx_acpi_match);
static int bq256xx_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bq256xx_device *bq = i2c_get_clientdata(client);
mutex_lock(&bq->irq_lock);
bq->resume_completed = false;
mutex_unlock(&bq->irq_lock);
return 0;
}
static int bq256xx_suspend_noirq(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bq256xx_device *bq = i2c_get_clientdata(client);
if (bq->irq_waiting) {
dev_err_ratelimited(dev, "Aborting suspend, an interrupt was detected while suspending\n");
return -EBUSY;
}
return 0;
}
static int bq256xx_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bq256xx_device *bq = i2c_get_clientdata(client);
mutex_lock(&bq->irq_lock);
bq->resume_completed = true;
mutex_unlock(&bq->irq_lock);
if (bq->irq_waiting) {
/* irq was pending, call the handler */
bq256xx_irq_handler_thread(client->irq, bq);
enable_irq(client->irq);
}
return 0;
}
static int bq256xx_restore(struct device *dev)
{
int ret = 0;
struct i2c_client *client = to_i2c_client(dev);
struct bq256xx_device *bq = i2c_get_clientdata(client);
if (client->irq > 0) {
disable_irq_nosync(client->irq);
devm_free_irq(dev, client->irq, bq);
/*
* Set extcon state depending upon USB connect/disconnect state
* on hibernation exit
*/
bq256xx_irq_handler_thread(client->irq, bq);
ret = devm_request_threaded_irq(dev, client->irq, NULL,
bq256xx_irq_handler_thread,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
dev_name(&client->dev), bq);
if (ret < 0) {
dev_err(dev, "get irq fail: %d\n", ret);
return ret;
}
enable_irq_wake(client->irq);
}
return ret;
}
static const struct dev_pm_ops bq256xx_pm_ops = {
.suspend = bq256xx_suspend,
.suspend_noirq = bq256xx_suspend_noirq,
.resume = bq256xx_resume,
.restore = bq256xx_restore,
};
static struct i2c_driver bq256xx_driver = {
.driver = {
.name = "bq256xx-charger",
.of_match_table = bq256xx_of_match,
.acpi_match_table = bq256xx_acpi_match,
.pm = &bq256xx_pm_ops,
},
.probe = bq256xx_probe,
.id_table = bq256xx_i2c_ids,

View file

@ -866,6 +866,25 @@ enum bq27xxx_dm_reg_id {
BQ27XXX_DM_DESIGN_CAPACITY = 0,
BQ27XXX_DM_DESIGN_ENERGY,
BQ27XXX_DM_TERMINATE_VOLTAGE,
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
BQ27XXX_DM_TAPER_RATE,
BQ27XXX_DM_QMAX,
BQ27XXX_RAM_R_A0_0,
BQ27XXX_RAM_R_A0_1,
BQ27XXX_RAM_R_A0_2,
BQ27XXX_RAM_R_A0_3,
BQ27XXX_RAM_R_A0_4,
BQ27XXX_RAM_R_A0_5,
BQ27XXX_RAM_R_A0_6,
BQ27XXX_RAM_R_A0_7,
BQ27XXX_RAM_R_A0_8,
BQ27XXX_RAM_R_A0_9,
BQ27XXX_RAM_R_A0_10,
BQ27XXX_RAM_R_A0_11,
BQ27XXX_RAM_R_A0_12,
BQ27XXX_RAM_R_A0_13,
BQ27XXX_RAM_R_A0_14,
#endif
};
#define bq27000_dm_regs NULL
@ -920,6 +939,25 @@ static struct bq27xxx_dm_reg bq27421_dm_regs[] = {
[BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 },
[BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
[BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2500, 3700 },
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
[BQ27XXX_DM_TAPER_RATE] = { 82, 27, 2, 0, 2000 }, /* Taper rate */
[BQ27XXX_DM_QMAX] = { 82, 0, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_0] = { 89, 0, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_1] = { 89, 2, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_2] = { 89, 4, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_3] = { 89, 6, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_4] = { 89, 8, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_5] = { 89, 10, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_6] = { 89, 12, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_7] = { 89, 14, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_8] = { 89, 16, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_9] = { 89, 18, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_10] = { 89, 20, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_11] = { 89, 22, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_12] = { 89, 24, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_13] = { 89, 26, 2, 0, 32767 },
[BQ27XXX_RAM_R_A0_14] = { 89, 28, 2, 0, 32767 },
#endif
};
static struct bq27xxx_dm_reg bq27425_dm_regs[] = {
@ -1058,10 +1096,30 @@ static const char * const bq27xxx_dm_reg_name[] = {
[BQ27XXX_DM_DESIGN_CAPACITY] = "design-capacity",
[BQ27XXX_DM_DESIGN_ENERGY] = "design-energy",
[BQ27XXX_DM_TERMINATE_VOLTAGE] = "terminate-voltage",
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
[BQ27XXX_DM_TAPER_RATE] = "Taper-rate",
[BQ27XXX_DM_QMAX] = "QMAX-Cell",
[BQ27XXX_RAM_R_A0_0] = "R_A0_0",
[BQ27XXX_RAM_R_A0_1] = "R_A0_1",
[BQ27XXX_RAM_R_A0_2] = "R_A0_2",
[BQ27XXX_RAM_R_A0_3] = "R_A0_3",
[BQ27XXX_RAM_R_A0_4] = "R_A0_4",
[BQ27XXX_RAM_R_A0_5] = "R_A0_5",
[BQ27XXX_RAM_R_A0_6] = "R_A0_6",
[BQ27XXX_RAM_R_A0_7] = "R_A0_7",
[BQ27XXX_RAM_R_A0_8] = "R_A0_8",
[BQ27XXX_RAM_R_A0_9] = "R_A0_9",
[BQ27XXX_RAM_R_A0_10] = "R_A0_10",
[BQ27XXX_RAM_R_A0_11] = "R_A0_11",
[BQ27XXX_RAM_R_A0_12] = "R_A0_12",
[BQ27XXX_RAM_R_A0_13] = "R_A0_13",
[BQ27XXX_RAM_R_A0_14] = "R_A0_14",
#endif
};
static bool bq27xxx_dt_to_nvm = true;
static bool bq27xxx_dt_to_nvm;
module_param_named(dt_monitored_battery_updates_nvm, bq27xxx_dt_to_nvm, bool, 0444);
MODULE_PARM_DESC(dt_monitored_battery_updates_nvm,
"Devicetree monitored-battery config updates data memory on NVM/flash chips.\n"
@ -1390,7 +1448,8 @@ static int bq27xxx_battery_write_dm_block(struct bq27xxx_device_info *di,
BQ27XXX_MSLEEP(1);
ret = bq27xxx_write_block(di, BQ27XXX_DM_DATA, buf->data, BQ27XXX_DM_SZ);
ret = bq27xxx_write_block(di, BQ27XXX_DM_DATA, buf->data,
(BQ27XXX_DM_SZ-1));
if (ret < 0)
goto out;
@ -1431,6 +1490,10 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di,
struct bq27xxx_dm_buf bd = BQ27XXX_DM_BUF(di, BQ27XXX_DM_DESIGN_CAPACITY);
struct bq27xxx_dm_buf bt = BQ27XXX_DM_BUF(di, BQ27XXX_DM_TERMINATE_VOLTAGE);
bool updated;
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
struct bq27xxx_dm_buf rt = BQ27XXX_DM_BUF(di, BQ27XXX_RAM_R_A0_0);
u32 i, taper_rate;
#endif
if (bq27xxx_battery_unseal(di) < 0)
return;
@ -1438,13 +1501,30 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di,
if (info->charge_full_design_uah != -EINVAL &&
info->energy_full_design_uwh != -EINVAL) {
bq27xxx_battery_read_dm_block(di, &bd);
/* assume design energy & capacity are in same block */
/* assume design energy, taper_rate & capacity are in same block */
bq27xxx_battery_update_dm_block(di, &bd,
BQ27XXX_DM_DESIGN_CAPACITY,
info->charge_full_design_uah / 1000);
bq27xxx_battery_update_dm_block(di, &bd,
BQ27XXX_DM_DESIGN_ENERGY,
info->energy_full_design_uwh / 1000);
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
bq27xxx_battery_read_dm_block(di, &rt);
/* update Taper rate based on the capacity and term current */
taper_rate = (u32)((info->charge_full_design_uah * 10) /
info->charge_term_current_ua);
bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_TAPER_RATE,
taper_rate);
/* update the QMAX-CELL0 and resistance table */
bq27xxx_battery_update_dm_block(di, &bd, BQ27XXX_DM_QMAX,
di->qmax_cell0);
for (i = 0 ; i < 15; i++)
bq27xxx_battery_update_dm_block(di, &rt,
(i + BQ27XXX_RAM_R_A0_0),
di->resist_table[i]);
#endif
}
if (info->voltage_min_design_uv != -EINVAL) {
@ -1461,6 +1541,19 @@ static void bq27xxx_battery_set_config(struct bq27xxx_device_info *di,
bq27xxx_battery_write_dm_block(di, &bd);
bq27xxx_battery_write_dm_block(di, &bt);
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
bq27xxx_battery_write_dm_block(di, &rt);
bq27xxx_battery_read_dm_block(di, &bd);
for (i = 0; i < BQ27XXX_DM_SZ; i++)
dev_dbg(di->dev, "BQ27xxx: DM_NVM[%d]: 0x%04x\n", i, bd.data[i]);
bq27xxx_battery_read_dm_block(di, &rt);
for (i = 0; i < BQ27XXX_DM_SZ; i++)
dev_dbg(di->dev, "BQ27xxx: Resisiatnce table DM_NVM[%d]:0x%04x\n",
i, rt.data[i]);
#endif
bq27xxx_battery_seal(di);
if (updated && !(di->opts & BQ27XXX_O_CFGUP)) {

View file

@ -136,6 +136,59 @@ static int bq27xxx_battery_i2c_bulk_write(struct bq27xxx_device_info *di,
return 0;
}
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
static int bq27xx_parse_dt(struct bq27xxx_device_info *di,
struct device *dev,
struct device_node *battery_np)
{
int ret;
int rc;
ret = of_property_read_u32(battery_np, "qmax-cell0", &di->qmax_cell0);
if (ret) {
dev_err(dev, "Undefined Qmax-Cell0\n");
return ret;
}
rc = of_property_count_elems_of_size(battery_np, "resist-table",
sizeof(u32));
if (rc != BQ27XXX_RESISTANCE_TABLE_LENGTH) {
dev_err(dev, "Invalid number of elements in resist-table\n");
return -EINVAL;
}
ret = of_property_read_u32_array(battery_np, "resist-table",
di->resist_table, BQ27XXX_RESISTANCE_TABLE_LENGTH);
if (ret)
dev_err(dev, "Undefined resistance table\n");
return ret;
}
#endif
static int bq27xxx_restore(struct device *dev)
{
int ret = 0;
struct i2c_client *client = to_i2c_client(dev);
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
if (client->irq > 0) {
disable_irq_nosync(client->irq);
devm_free_irq(dev, client->irq, di);
ret = request_threaded_irq(client->irq,
NULL, bq27xxx_battery_irq_handler_thread,
IRQF_ONESHOT,
di->name, di);
}
return ret;
}
static const struct dev_pm_ops bq27xxx_pm_ops = {
.restore = bq27xxx_restore,
};
static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@ -143,6 +196,9 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
int ret;
char *name;
int num;
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
struct device_node *battery_np_rt;
#endif
/* Get new ID for the new battery device */
mutex_lock(&battery_mutex);
@ -169,6 +225,17 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
di->bus.read_bulk = bq27xxx_battery_i2c_bulk_read;
di->bus.write_bulk = bq27xxx_battery_i2c_bulk_write;
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
battery_np_rt = of_parse_phandle(client->dev.of_node,
"bat-resist-table", 0);
if (!battery_np_rt)
return -ENODEV;
ret = bq27xx_parse_dt(di, di->dev, battery_np_rt);
of_node_put(battery_np_rt);
if (ret)
return -EINVAL;
#endif
ret = bq27xxx_battery_setup(di);
if (ret)
goto err_failed;
@ -295,6 +362,7 @@ static struct i2c_driver bq27xxx_battery_i2c_driver = {
.driver = {
.name = "bq27xxx-battery",
.of_match_table = of_match_ptr(bq27xxx_battery_i2c_of_match_table),
.pm = &bq27xxx_pm_ops,
},
.probe = bq27xxx_battery_i2c_probe,
.remove = bq27xxx_battery_i2c_remove,

View file

@ -77,4 +77,16 @@ config DTPM_DEVFREQ
help
This enables support for device power limitation based on
energy model.
config QCOM_POWER_TELEMETRY
tristate "Qualcomm Technologies, Inc. Power Telemetry Hardware driver"
depends on SPMI && NVMEM_SPMI_SDAM
default n
help
This enables Qualcomm Technologies, Inc. power telemetry
hardware device driver. It provides to measure different
pmic regulators or bucks power consumption data in different
modes. It exposes these data to userspace clients via
powercap sysfs interface.
endif

View file

@ -8,3 +8,5 @@ obj-$(CONFIG_INTEL_RAPL) += intel_rapl_msr.o
obj-$(CONFIG_IDLE_INJECT) += idle_inject.o
obj-$(CONFIG_QCOM_EPM) += qti_epm_hardware.o
qti_epm_hardware-y += qti_epm_hw.o qti_epm_interface.o
obj-$(CONFIG_QCOM_POWER_TELEMETRY) += qcom_power_telemetry.o
qcom_power_telemetry-y += qti_power_telemetry.o qti_power_telemetry_interface.o

View file

@ -0,0 +1,515 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "qti_qpt: %s: " fmt, __func__
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/sched/clock.h>
#include "qti_power_telemetry.h"
#define QPT_CONFIG_SDAM_BASE_OFF 0x45
#define QPT_DATA_SDAM_BASE_OFF 0x45
#define QPT_CH_ENABLE_MASK BIT(7)
#define QPT_SID_MASK GENMASK(3, 0)
#define QPT_GANG_NUM_MASK 0x70
#define QPT_DATA_BYTE_SIZE 2
#define QPT_DATA_TO_POWER_UW 1500L /* 1 LSB = 1.5 mW */
#define QPT_GET_POWER_UW_FROM_ADC(adc) ((adc) * QPT_DATA_TO_POWER_UW)
#define QPT_SDAM_SAMPLING_MS 1280
static int qpt_sdam_nvmem_read(struct qpt_priv *qpt, struct qpt_sdam *sdam,
uint16_t offset, size_t bytes, void *data)
{
int rc = 0;
mutex_lock(&sdam->lock);
rc = nvmem_device_read(sdam->nvmem, offset, bytes, data);
mutex_unlock(&sdam->lock);
if (rc < 0)
dev_err(qpt->dev,
"Failed to read sdam[%d] off:%#x,size:%ld rc=%d\n",
sdam->id, offset, bytes, rc);
return rc;
}
static int qti_qpt_read_rtc_time(struct qpt_priv *qpt, u64 *rtc_ts)
{
int rc = -1;
rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM],
QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_RTC0, 4, &rtc_ts);
if (rc < 0)
return rc;
return 0;
}
static void qpt_channel_avg_data_update(struct qpt_device *qpt_dev,
uint8_t lsb, uint8_t msb, u64 ts)
{
mutex_lock(&qpt_dev->lock);
qpt_dev->last_data = (msb << 8) | lsb;
qpt_dev->last_data_uw = QPT_GET_POWER_UW_FROM_ADC(qpt_dev->last_data);
mutex_unlock(&qpt_dev->lock);
QPT_DBG(qpt_dev->priv, "qpt[%s]: power:%lluuw msb:0x%x lsb:0x%x",
qpt_dev->name, qpt_dev->last_data_uw, msb, lsb);
}
static int qti_qpt_read_seq_count(struct qpt_priv *qpt, int *seq_count)
{
int rc = -1;
rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM],
QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_SEQ_START, 1, &seq_count);
if (rc < 0)
return rc;
return 0;
}
static int qti_qpt_read_all_data(struct qpt_priv *qpt, uint16_t offset, size_t size)
{
uint8_t data_sdam_avg[DATA_SDAM_POWER_MSB_CH48 + 1] = {0};
int seq_count = 0;
int rc = 0;
struct qpt_device *qpt_dev;
int seq_count_start = -1;
rc = qti_qpt_read_seq_count(qpt, &seq_count);
if (rc < 0)
return rc;
do {
seq_count_start = seq_count;
rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[DATA_AVG_SDAM], offset,
size, data_sdam_avg);
if (rc < 0)
return rc;
rc = qti_qpt_read_seq_count(qpt, &seq_count);
if (rc < 0)
return rc;
} while (seq_count < seq_count_start);
qpt->hw_read_ts = ktime_get();
qti_qpt_read_rtc_time(qpt, &qpt->rtc_ts);
list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) {
if (!qpt_dev->enabled)
continue;
if (qpt_dev->data_offset >= (offset + size))
continue;
qpt_channel_avg_data_update(qpt_dev,
data_sdam_avg[qpt_dev->data_offset],
data_sdam_avg[qpt_dev->data_offset + 1],
qpt->hw_read_ts);
}
QPT_DBG(qpt, "Time(us) to read all channel:%lldus & RTC Time:%lld",
ktime_to_us(ktime_sub(ktime_get(), qpt->hw_read_ts)),
qpt->rtc_ts);
return 0;
}
static void qti_qpt_get_power(struct qpt_device *qpt_dev, u64 *power_uw)
{
mutex_lock(&qpt_dev->lock);
*power_uw = qpt_dev->last_data_uw;
mutex_unlock(&qpt_dev->lock);
}
static int qti_qpt_read_data_update(struct qpt_priv *qpt)
{
int rc = 0;
mutex_lock(&qpt->hw_read_lock);
rc = qti_qpt_read_all_data(qpt,
QPT_DATA_SDAM_BASE_OFF + DATA_SDAM_POWER_LSB_CH1,
qpt->last_ch_offset + 2);
mutex_unlock(&qpt->hw_read_lock);
if (rc < 0)
return rc;
return 0;
}
static irqreturn_t qpt_sdam_irq_handler(int irq, void *data)
{
struct qpt_priv *qpt = data;
qti_qpt_read_data_update(qpt);
return IRQ_HANDLED;
}
static int get_dt_index_from_ppid(struct qpt_device *qpt_dev)
{
uint16_t ppid = 0, i = 0;
struct qpt_priv *qpt = qpt_dev->priv;
if (!qpt_dev->enabled || !qpt->dt_reg_cnt)
return -EINVAL;
ppid = qpt_dev->sid << 8 | qpt_dev->pid;
for (i = 0; i < qpt->dt_reg_cnt; i++) {
if (ppid == qpt->reg_ppid_map[i])
return i;
}
return -ENODEV;
}
static int qti_qpt_config_sdam_initialize(struct qpt_priv *qpt)
{
uint8_t *config_sdam = NULL;
struct qpt_device *qpt_dev = NULL;
int rc = 0;
uint8_t conf_idx, data_idx;
if (!qpt->sdam[CONFIG_SDAM].nvmem) {
dev_err(qpt->dev, "Invalid sdam nvmem\n");
return -EINVAL;
}
config_sdam = devm_kcalloc(qpt->dev, MAX_CONFIG_SDAM_DATA,
sizeof(*config_sdam), GFP_KERNEL);
if (!config_sdam)
return -ENOMEM;
rc = qpt_sdam_nvmem_read(qpt, &qpt->sdam[CONFIG_SDAM],
QPT_CONFIG_SDAM_BASE_OFF,
MAX_CONFIG_SDAM_DATA, config_sdam);
if (rc < 0)
return rc;
if (!(config_sdam[CONFIG_SDAM_QPT_MODE] & BIT(7))) {
dev_err(qpt->dev, "pmic qpt is in disabled state, reg:0x%x\n",
config_sdam[CONFIG_SDAM_QPT_MODE]);
return -ENODEV;
}
qpt->mode = config_sdam[CONFIG_SDAM_QPT_MODE] & BIT(0);
qpt->max_data = config_sdam[CONFIG_SDAM_MAX_DATA];
qpt->config_sdam_data = config_sdam;
/* logic to read number of channels and die_temps */
for (conf_idx = CONFIG_SDAM_CONFIG_1, data_idx = 0;
conf_idx <= CONFIG_SDAM_CONFIG_48;
conf_idx += 2, data_idx += QPT_DATA_BYTE_SIZE) {
const char *reg_name;
if (!(config_sdam[conf_idx] & QPT_CH_ENABLE_MASK))
continue;
qpt->num_reg++;
qpt_dev = devm_kzalloc(qpt->dev, sizeof(*qpt_dev), GFP_KERNEL);
if (!qpt_dev)
return -ENOMEM;
qpt_dev->enabled = true;
qpt_dev->sid = config_sdam[conf_idx] & QPT_SID_MASK;
qpt_dev->gang_num = config_sdam[conf_idx] & QPT_GANG_NUM_MASK;
qpt_dev->pid = config_sdam[conf_idx + 1];
qpt_dev->priv = qpt;
qpt_dev->data_offset = data_idx;
mutex_init(&qpt_dev->lock);
if (data_idx > qpt->last_ch_offset)
qpt->last_ch_offset = data_idx;
rc = get_dt_index_from_ppid(qpt_dev);
if (rc < 0 || rc >= qpt->dt_reg_cnt) {
dev_err(qpt->dev, "No matching channel ppid, rc:%d\n",
rc);
return rc;
}
of_property_read_string_index(qpt->dev->of_node,
"qcom,reg-ppid-names", rc, &reg_name);
dev_dbg(qpt->dev, "%s: qpt channel:%s off:0x%x\n", __func__,
reg_name, data_idx);
strscpy(qpt_dev->name, reg_name, sizeof(qpt_dev->name));
list_add(&qpt_dev->qpt_node, &qpt->qpt_dev_head);
}
return 0;
}
static int qpt_get_sdam_nvmem(struct device *dev, struct qpt_sdam *sdam,
char *sdam_name)
{
int rc = 0;
sdam->nvmem = devm_nvmem_device_get(dev, sdam_name);
if (IS_ERR(sdam->nvmem)) {
rc = PTR_ERR(sdam->nvmem);
if (rc != -EPROBE_DEFER)
dev_err(dev, "Failed to get nvmem device, rc=%d\n",
rc);
sdam->nvmem = NULL;
return rc;
}
return rc;
}
static int qpt_parse_sdam_data(struct qpt_priv *qpt)
{
int rc = 0;
char buf[20];
rc = of_property_count_strings(qpt->dev->of_node, "nvmem-names");
if (rc < 0) {
dev_err(qpt->dev, "Could not find nvmem device\n");
return rc;
}
if (rc != MAX_QPT_SDAM) {
dev_err(qpt->dev, "Invalid num of SDAMs:%d\n", rc);
return -EINVAL;
}
qpt->num_sdams = rc;
qpt->sdam = devm_kcalloc(qpt->dev, qpt->num_sdams,
sizeof(*qpt->sdam), GFP_KERNEL);
if (!qpt->sdam)
return -ENOMEM;
/* Check for config sdam */
qpt->sdam[0].id = CONFIG_SDAM;
scnprintf(buf, sizeof(buf), "qpt-config-sdam");
mutex_init(&qpt->sdam[0].lock);
rc = qpt_get_sdam_nvmem(qpt->dev, &qpt->sdam[0], buf);
if (rc < 0)
return rc;
/* Check data sdam */
qpt->sdam[1].id = DATA_AVG_SDAM;
mutex_init(&qpt->sdam[1].lock);
scnprintf(buf, sizeof(buf), "qpt-data-sdam");
rc = qpt_get_sdam_nvmem(qpt->dev, &qpt->sdam[1], buf);
if (rc < 0)
return rc;
return 0;
}
static int qpt_pd_callback(struct notifier_block *nfb,
unsigned long action, void *v)
{
struct qpt_priv *qpt = container_of(nfb, struct qpt_priv, genpd_nb);
ktime_t now;
s64 diff;
struct qpt_device *qpt_dev;
if (atomic_read(&qpt->in_suspend))
goto cb_exit;
switch (action) {
case GENPD_NOTIFY_OFF:
if (qpt->irq_enabled) {
disable_irq_nosync(qpt->irq);
qpt->irq_enabled = false;
}
break;
case GENPD_NOTIFY_ON:
if (qpt->irq_enabled)
break;
now = ktime_get();
diff = ktime_to_ms(ktime_sub(now, qpt->hw_read_ts));
if (diff > QPT_SDAM_SAMPLING_MS) {
list_for_each_entry(qpt_dev, &qpt->qpt_dev_head,
qpt_node) {
qpt_dev->last_data = 0;
qpt_dev->last_data_uw = 0;
}
}
enable_irq(qpt->irq);
qpt->irq_enabled = true;
break;
default:
break;
}
cb_exit:
return NOTIFY_OK;
}
static int qti_qpt_pd_notifier_register(struct qpt_priv *qpt, struct device *dev)
{
int ret;
pm_runtime_enable(dev);
qpt->genpd_nb.notifier_call = qpt_pd_callback;
qpt->genpd_nb.priority = INT_MIN;
ret = dev_pm_genpd_add_notifier(dev, &qpt->genpd_nb);
if (ret)
pm_runtime_disable(dev);
return ret;
}
static int qpt_parse_dt(struct qpt_priv *qpt)
{
struct platform_device *pdev;
int rc = 0;
struct device_node *np = qpt->dev->of_node;
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(qpt->dev, "Invalid pdev\n");
return -ENODEV;
}
rc = of_property_count_strings(np, "qcom,reg-ppid-names");
if (rc < 1 || rc >= QPT_POWER_CH_MAX) {
dev_err(qpt->dev,
"Invalid ppid name mapping count, rc=%d\n", rc);
return rc;
}
qpt->dt_reg_cnt = rc;
rc = of_property_count_elems_of_size(np, "qcom,reg-ppid-ids",
sizeof(u16));
if (rc < 1 || rc >= QPT_POWER_CH_MAX || rc != qpt->dt_reg_cnt) {
dev_err(qpt->dev,
"Invalid ppid mapping count, rc = %d strings:%d\n",
rc, qpt->dt_reg_cnt);
return rc;
}
rc = of_property_read_u16_array(np, "qcom,reg-ppid-ids",
qpt->reg_ppid_map, qpt->dt_reg_cnt);
if (rc < 0) {
dev_err(qpt->dev,
"Failed to read ppid mapping array, rc = %d\n", rc);
return rc;
}
rc = qpt_parse_sdam_data(qpt);
if (rc < 0)
return rc;
rc = platform_get_irq(pdev, 0);
if (rc <= 0) {
dev_err(qpt->dev, "Failed to get qpt irq, rc=%d\n", rc);
return -EINVAL;
}
qpt->irq = rc;
if (of_find_property(np, "power-domains", NULL) && pdev->dev.pm_domain) {
rc = qti_qpt_pd_notifier_register(qpt, &pdev->dev);
if (rc) {
dev_err(qpt->dev, "Failed to register for pd notifier\n");
return rc;
}
}
return 0;
}
static int qti_qpt_hw_init(struct qpt_priv *qpt)
{
int rc;
if (qpt->initialized)
return 0;
mutex_init(&qpt->hw_read_lock);
INIT_LIST_HEAD(&qpt->qpt_dev_head);
rc = qpt_parse_dt(qpt);
if (rc < 0) {
dev_err(qpt->dev, "Failed to parse qpt rc=%d\n", rc);
return rc;
}
rc = qti_qpt_config_sdam_initialize(qpt);
if (rc < 0) {
dev_err(qpt->dev, "Failed to parse config sdam rc=%d\n", rc);
return rc;
}
atomic_set(&qpt->in_suspend, 0);
rc = devm_request_threaded_irq(qpt->dev, qpt->irq,
NULL, qpt_sdam_irq_handler,
IRQF_ONESHOT, "qti_qpt_irq", qpt);
if (rc < 0) {
dev_err(qpt->dev,
"Failed to request IRQ for qpt, rc=%d\n", rc);
return rc;
}
irq_set_status_flags(qpt->irq, IRQ_DISABLE_UNLAZY);
qpt->irq_enabled = true;
qpt->initialized = true;
/* Update first reading for all channels */
qti_qpt_read_data_update(qpt);
return 0;
}
static int qti_qpt_suspend(struct qpt_priv *qpt)
{
atomic_set(&qpt->in_suspend, 1);
if (qpt->irq_enabled) {
disable_irq_nosync(qpt->irq);
qpt->irq_enabled = false;
}
return 0;
}
static int qti_qpt_resume(struct qpt_priv *qpt)
{
struct qpt_device *qpt_dev = NULL;
ktime_t now;
s64 diff;
now = ktime_get();
diff = ktime_to_ms(ktime_sub(now, qpt->hw_read_ts));
if (diff > QPT_SDAM_SAMPLING_MS) {
list_for_each_entry(qpt_dev, &qpt->qpt_dev_head,
qpt_node) {
qpt_dev->last_data = 0;
qpt_dev->last_data_uw = 0;
}
}
if (!qpt->irq_enabled) {
enable_irq(qpt->irq);
qpt->irq_enabled = true;
}
atomic_set(&qpt->in_suspend, 0);
return 0;
}
static void qti_qpt_hw_release(struct qpt_priv *qpt)
{
pm_runtime_disable(qpt->dev);
dev_pm_genpd_remove_notifier(qpt->dev);
}
struct qpt_ops qpt_hw_ops = {
.init = qti_qpt_hw_init,
.get_power = qti_qpt_get_power,
.suspend = qti_qpt_suspend,
.resume = qti_qpt_resume,
.release = qti_qpt_hw_release,
};
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Telemetry driver");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,194 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_QPT_H__
#define __QCOM_QPT_H__
#include <linux/interrupt.h>
#include <linux/ipc_logging.h>
#include <linux/powercap.h>
struct qpt_priv;
struct qpt_device;
#define IPC_LOGPAGES 10
#define QPT_DBG(qpt, msg, args...) do { \
dev_dbg(qpt->dev, "%s:" msg, __func__, args); \
if ((qpt) && (qpt)->ipc_log) { \
ipc_log_string((qpt)->ipc_log, \
"[%s] "msg"\n", \
current->comm, args); \
} \
} while (0)
#define QPT_REG_NAME_LENGTH 32
#define QPT_POWER_CH_MAX 48
#define QPT_TZ_CH_MAX 8
#define QPT_MAX_DATA_MAX 10
/* Different qpt modes of operation */
enum qpt_mode {
QPT_ACAT_MODE,
QPT_RCM_MODE,
QPT_MODE_MAX
};
/* Different qpt sdam IDs to use as an index into an array */
enum qpt_sdam_id {
CONFIG_SDAM,
DATA_AVG_SDAM,
MAX_QPT_SDAM
};
/* Data sdam field IDs to use as an index into an array */
enum data_sdam_field_ids {
DATA_SDAM_SEQ_START,
DATA_SDAM_SEQ_END,
DATA_SDAM_NUM_RECORDS,
DATA_SDAM_RTC0,
DATA_SDAM_RTC1,
DATA_SDAM_RTC2,
DATA_SDAM_RTC3,
DATA_SDAM_VPH_LSB,
DATA_SDAM_VPH_MSB,
DATA_SDAM_DIE_TEMP_SID1,
DATA_SDAM_DIE_TEMP_SID8 = DATA_SDAM_DIE_TEMP_SID1 + QPT_TZ_CH_MAX - 1,
DATA_SDAM_POWER_LSB_CH1,
DATA_SDAM_POWER_MSB_CH1,
DATA_SDAM_POWER_LSB_CH48 = DATA_SDAM_POWER_LSB_CH1 + 2 * (QPT_POWER_CH_MAX - 1),
DATA_SDAM_POWER_MSB_CH48,
MAX_SDAM_DATA
};
/* config sdam field IDs to use as an index into an array */
enum config_sdam_field_ids {
CONFIG_SDAM_QPT_MODE,
CONFIG_SDAM_QPT_STATUS,
CONFIG_SDAM_MAX_DATA,
CONFIG_SDAM_MEAS_CFG,
CONFIG_SDAM_LAST_FULL_SDAM,
CONFIG_SDAM_CONFIG_1,
CONFIG_SDAM_PID_1,
CONFIG_SDAM_CONFIG_48 = CONFIG_SDAM_CONFIG_1 + 2 * (QPT_POWER_CH_MAX - 1),
MAX_CONFIG_SDAM_DATA
};
/**
* struct qpt_sdam - QPT sdam data structure
* @id: QPT sdam id type
* @nvmem: Pointer to nvmem device
* @lock: lock to protect multiple read concurrently
* @last_data: last full read data copy for current sdam
*/
struct qpt_sdam {
enum qpt_sdam_id id;
struct nvmem_device *nvmem;
struct mutex lock;
uint8_t last_data[MAX_CONFIG_SDAM_DATA];
};
/**
* struct qpt_device - Each regulator channel device data
* @qpt_node: qpt device list head member to traverse all devices
* @priv: qpt hardware instance that this channel is connected to
* @pz: array of powercap zone data types for different data retrieval
* @name: name of the regulator which is used to identify channel
* @enabled: qpt channel is enabled or not
* @sid: qpt channel SID
* @pid: qpt channel PID
* @gang_num: qpt channel gang_num
* @data_offset: qpt channel power data offset from DATA sdam base
* @last_data: qpt channel last 1S data
* @last_data_uw: qpt channel last 10S average data
* @lock: lock to protect multiple client read concurrently
*/
struct qpt_device {
struct list_head qpt_node;
struct qpt_priv *priv;
struct powercap_zone pz;
char name[QPT_REG_NAME_LENGTH];
bool enabled;
uint8_t sid;
uint8_t pid;
uint8_t gang_num;
uint8_t data_offset;
uint16_t last_data;
u64 last_data_uw;
struct mutex lock;
};
/**
* struct qpt_priv - Structure for QPT hardware private data
* @dev: Pointer for QPT device
* @mode: enum to give current mode of operation
* @sdam: Pointer for array of QPT sdams
* @pct: pointer to powercap control type
* @irq: qpt sdam pbs irq number
* @num_sdams: Number of SDAMs used for QPT from DT
* @num_reg: Number of regulator based on config sdam
* @max_data: QPT hardware max_data configuration
* @reg_ppid_map: array of regulator/rail PPID from devicetree
* @dt_reg_cnt: Number of regulator count in devicetree
* @last_ch_offset: Last enabled data channel offset
* @initialized: QPT hardware initialization is done if it is true
* @irq_enabled: The qpt irq enable/disable status
* @in_suspend: The QPT driver suspend status
* @ops: QPT hardware supported ops
* @config_sdam_data: Config sdam data dump collected at init
* @ipc_log: Handle to ipc_logging
* @hw_read_ts: Timestamp collected just after qpt irq data update
* @rtc_ts: RTC Timestamp collected just after qpt irq data update
* @qpt_dev_head: List head for all qpt channel devices
* @hw_read_lock: lock to protect avg data update and client request
* @genpd_nb: Genpd notifier for apps idle notification
*/
struct qpt_priv {
struct device *dev;
enum qpt_mode mode;
struct qpt_sdam *sdam;
struct powercap_control_type *pct;
int irq;
u32 num_sdams;
u32 num_reg;
u8 max_data;
u16 reg_ppid_map[QPT_POWER_CH_MAX];
u8 dt_reg_cnt;
u8 last_ch_offset;
bool initialized;
bool irq_enabled;
atomic_t in_suspend;
struct qpt_ops *ops;
uint8_t *config_sdam_data;
void *ipc_log;
u64 hw_read_ts;
u64 rtc_ts;
struct list_head qpt_dev_head;
struct mutex hw_read_lock;
struct notifier_block genpd_nb;
};
/**
* struct qpt_ops - Structure for QPT hardware supported ops
* @init: QPT hardware init function
* @get_mode: Function to get current QPT operation mode
* @get_power: Function to get power for QPT channel in us for a given type
* @get_max_power: Function to get max power which QPT channel can deliver
* @release: Function to clear all QPT data on exit
* @suspend: Function to execute QPT during suspend callback if any
* @resume: Function to restore QPT durng resume callback if any
*/
struct qpt_ops {
int (*init)(struct qpt_priv *priv);
void (*get_power)(struct qpt_device *qpt_dev, u64 *power);
int (*get_max_power)(const struct qpt_device *qpt_dev, u64 *max_power);
void (*release)(struct qpt_priv *qpt);
int (*suspend)(struct qpt_priv *qpt);
int (*resume)(struct qpt_priv *qpt);
};
extern struct qpt_ops qpt_hw_ops;
extern void qpt_sysfs_notify(struct qpt_priv *qpt);
#endif /* __QCOM_QPT_H__ */

View file

@ -0,0 +1,247 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "qti_power_telemetry.h"
#define QPT_HW "qti-qpt-hw"
static inline struct qpt_device *to_qpt_dev_pz(struct powercap_zone *pz)
{
return container_of(pz, struct qpt_device, pz);
}
static const char * const constraint_name[] = {
"dummy",
};
void qpt_sysfs_notify(struct qpt_priv *qpt)
{
struct powercap_control_type *pct;
if (!qpt || !qpt->pct)
return;
pct = qpt->pct;
sysfs_notify(&pct->dev.kobj, NULL, "enabled");
}
static int qpt_suspend(struct device *dev)
{
struct qpt_device *qpt_dev = dev_get_drvdata(dev);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->suspend)
return qpt->ops->suspend(qpt);
return 0;
}
static int qpt_resume(struct device *dev)
{
struct qpt_device *qpt_dev = dev_get_drvdata(dev);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->resume)
return qpt->ops->resume(qpt);
return 0;
}
static int qpt_get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window)
{
return -EOPNOTSUPP;
}
static int qpt_set_time_window_us(struct powercap_zone *pcz, int cid, u64 window)
{
return -EOPNOTSUPP;
}
static int qpt_get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
{
struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->get_max_power)
qpt->ops->get_max_power(qpt_dev, max_power_uw);
return 0;
}
static int qpt_get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
{
struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->get_power)
qpt->ops->get_power(qpt_dev, power_uw);
else
return -EOPNOTSUPP;
return 0;
}
static int qpt_release_zone(struct powercap_zone *pcz)
{
struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->release)
qpt->ops->release(qpt);
return 0;
}
static int qpt_get_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 *power_limit)
{
return -EOPNOTSUPP;
}
static int qpt_set_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 power_limit)
{
return -EOPNOTSUPP;
}
static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
{
return constraint_name[cid];
}
static int qpt_get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
{
struct qpt_device *qpt_dev = to_qpt_dev_pz(pcz);
struct qpt_priv *qpt = qpt_dev->priv;
if (qpt->ops->get_max_power)
return qpt->ops->get_max_power(qpt_dev, max_power);
else
return -EOPNOTSUPP;
}
static struct powercap_zone_constraint_ops constraint_ops = {
.set_power_limit_uw = qpt_set_power_limit_uw,
.get_power_limit_uw = qpt_get_power_limit_uw,
.set_time_window_us = qpt_set_time_window_us,
.get_time_window_us = qpt_get_time_window_us,
.get_max_power_uw = qpt_get_max_power_uw,
.get_name = get_constraint_name,
};
static struct powercap_zone_ops zone_ops = {
.get_max_power_range_uw = qpt_get_max_power_range_uw,
.get_power_uw = qpt_get_power_uw,
.release = qpt_release_zone,
};
static int powercap_register(struct qpt_priv *qpt)
{
struct qpt_device *qpt_dev;
struct powercap_zone *pcz = NULL;
qpt->pct = powercap_register_control_type(NULL, "qpt", NULL);
if (IS_ERR(qpt->pct)) {
dev_err(qpt->dev, "Failed to register control type\n");
return PTR_ERR(qpt->pct);
}
list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) {
if (!qpt_dev->enabled)
continue;
pcz = powercap_register_zone(&qpt_dev->pz, qpt->pct,
qpt_dev->name, NULL, &zone_ops, 1,
&constraint_ops);
if (IS_ERR(pcz))
return PTR_ERR(pcz);
}
return 0;
}
static int qpt_hw_device_probe(struct platform_device *pdev)
{
int ret;
struct qpt_priv *qpt;
qpt = devm_kzalloc(&pdev->dev, sizeof(*qpt), GFP_KERNEL);
if (!qpt)
return -ENOMEM;
qpt->dev = &pdev->dev;
qpt->ops = &qpt_hw_ops;
qpt->ipc_log = ipc_log_context_create(IPC_LOGPAGES, "Qpt", 0);
if (!qpt->ipc_log)
dev_err(qpt->dev, "%s: unable to create IPC Logging for %s\n",
__func__, "qti_qpt");
if (!qpt->ops || !qpt->ops->init ||
!qpt->ops->get_power || !qpt->ops->release)
return -EINVAL;
ret = qpt->ops->init(qpt);
if (ret < 0) {
dev_err(&pdev->dev, "%s: init failed\n", __func__);
return ret;
}
platform_set_drvdata(pdev, qpt);
dev_set_drvdata(qpt->dev, qpt);
return powercap_register(qpt);
}
static int qpt_hw_device_remove(struct platform_device *pdev)
{
struct qpt_priv *qpt = platform_get_drvdata(pdev);
struct qpt_device *qpt_dev;
list_for_each_entry(qpt_dev, &qpt->qpt_dev_head, qpt_node) {
if (qpt->pct)
powercap_unregister_zone(qpt->pct,
&qpt_dev->pz);
}
if (qpt->pct)
powercap_unregister_control_type(qpt->pct);
if (qpt->ops->release)
qpt->ops->release(qpt);
return 0;
}
static const struct dev_pm_ops qpt_pm_ops = {
.suspend = qpt_suspend,
.resume = qpt_resume,
};
static const struct of_device_id qpt_hw_device_match[] = {
{.compatible = "qcom,power-telemetry"},
{}
};
static struct platform_driver qpt_hw_device_driver = {
.probe = qpt_hw_device_probe,
.remove = qpt_hw_device_remove,
.driver = {
.name = QPT_HW,
.pm = &qpt_pm_ops,
.of_match_table = qpt_hw_device_match,
},
};
module_platform_driver(qpt_hw_device_driver);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Telemetry driver");
MODULE_LICENSE("GPL");

View file

@ -39,18 +39,26 @@ static const struct regmap_config ap27700_regmap_config = {
.max_register = 0x4,
};
#define AP72200_MAX_WRITE_RETRIES 4
static int ap72200_vreg_enable(struct regulator_dev *rdev)
{
struct ap72200_vreg *vreg = rdev_get_drvdata(rdev);
int rc, val;
int rc, val, retries;
gpiod_set_value_cansleep(vreg->ena_gpiod, 1);
val = DIV_ROUND_UP(vreg->rdesc.fixed_uV - AP72200_MIN_UV, AP72200_STEP_UV);
/* Set the voltage */
rc = regmap_write(vreg->regmap, AP72200_VSEL_REG_ADDR,
val);
retries = AP72200_MAX_WRITE_RETRIES;
do {
/* Set the voltage */
rc = regmap_write(vreg->regmap, AP72200_VSEL_REG_ADDR,
val);
if (!rc)
break;
} while (retries--);
if (rc) {
dev_err(vreg->dev, "Failed to set voltage rc: %d\n", rc);
return rc;

View file

@ -141,7 +141,7 @@ static int virtio_glink_bridge_send_msg(struct virtio_glink_bridge *vgbridge,
memset(msg, 0, sizeof(*msg));
msg->type = cpu_to_virtio32(vdev, msg_type);
msg->label = cpu_to_virtio32(vdev, label);
sg_init_one(&sg, msg, sizeof(*msg));
sg_init_one(&sg, msg, sizeof(struct virtio_glink_bridge_msg));
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_KERNEL);
if (rc) {
@ -167,7 +167,7 @@ static int virtio_glink_bridge_send_msg_ack(struct virtio_glink_bridge *vgbridge
ack->type = cpu_to_virtio32(vdev, msg_type);
ack->label = cpu_to_virtio32(vdev, label);
ack->status = cpu_to_virtio32(vdev, status);
sg_init_one(&sg, ack, sizeof(*ack));
sg_init_one(&sg, ack, sizeof(struct virtio_glink_bridge_msg));
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_KERNEL);
if (rc) {

View file

@ -107,6 +107,22 @@ config QCOM_DCC_V2
driver provides interface to configure DCC block and read back
captured data from DCC's internal SRAM.
config MSM_SPM
bool "Driver support for SPM and AVS wrapper hardware"
help
Enables the support for SPM and AVS wrapper hardware on MSMs. SPM
hardware is used to manage the processor power during sleep. The
driver allows configuring SPM to allow different low power modes for
both core and L2.
config MSM_L2_SPM
bool "SPM support for L2 cache"
help
Enable SPM driver support for L2 cache. Some MSM chipsets allow
control of L2 cache low power mode with a Subsystem Power manager.
Enabling this driver allows configuring L2 SPM for low power modes
on supported chipsets.
config QCOM_GENI_SE
tristate "QCOM GENI Serial Engine Driver"
depends on ARCH_QCOM || COMPILE_TEST

View file

@ -60,6 +60,7 @@ obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_QCOM_CPU_VENDOR_HOOKS) += qcom_cpu_vendor_hooks.o
obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti.o
obj-$(CONFIG_MSM_SPM) += msm-spm.o spm_devices.o
crypto-qti-y += crypto-qti-common.o
crypto-qti-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER) += crypto-qti-hwkm.o

View file

@ -257,7 +257,8 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address,
int page_count)
{
struct page **pages = NULL;
int i, ret = 0;
struct vm_area_struct *vma = NULL;
int i, ret, page_nr = 0;
struct dma_buf *dmabuf = NULL;
struct pages_list *pglist = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@ -276,14 +277,40 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address,
mmap_read_lock(current->mm);
ret = get_user_pages(address, page_count, 0, pages, NULL);
/*
* Need below sanity checks:
* 1. input uva is covered by an existing VMA of the current process
* 2. the given uva range is fully covered in the same VMA
*/
vma = vma_lookup(current->mm, address);
if (!range_in_vma(vma, address, address + page_count * PAGE_SIZE)) {
mmap_read_unlock(current->mm);
pr_err("input uva [0x%lx, 0x%lx) not covered in one VMA. UVA or size(%d) is invalid\n",
address, address + page_count * PAGE_SIZE, page_count * PAGE_SIZE);
ret = -EINVAL;
goto err;
}
page_nr = get_user_pages(address, page_count, 0, pages, NULL);
mmap_read_unlock(current->mm);
if (ret <= 0) {
if (page_nr <= 0) {
ret = -EINVAL;
pr_err("get %d user pages failed %d\n",
page_count, ret);
page_count, page_nr);
goto err;
}
/*
* The actual number of the pinned pages is returned by get_user_pages.
* It may not match with the requested number.
*/
if (page_nr != page_count) {
ret = -EINVAL;
pr_err("input page cnt %d not match with pinned %d\n", page_count, page_nr);
for (i = 0; i < page_nr; i++)
put_page(pages[i]);
goto err;
}
@ -306,6 +333,7 @@ static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address,
ret = PTR_ERR(dmabuf);
goto err;
}
return dmabuf;
err:

View file

@ -2282,7 +2282,7 @@ static int hgsl_ioctl_mem_alloc(
struct hgsl_priv *priv = filep->private_data;
struct hgsl_ioctl_mem_alloc_params *params = data;
struct qcom_hgsl *hgsl = priv->dev;
int ret = 0;
int ret = 0, mem_fd = -1;
struct hgsl_mem_node *mem_node = NULL;
struct hgsl_hab_channel_t *hab_channel = NULL;
@ -2298,6 +2298,13 @@ static int hgsl_ioctl_mem_alloc(
goto out;
}
mem_fd = get_unused_fd_flags(O_CLOEXEC);
if (mem_fd < 0) {
LOGE("no available fd %d", mem_fd);
ret = -EMFILE;
goto out;
}
mem_node = hgsl_mem_node_zalloc(hgsl->default_iocoherency);
if (mem_node == NULL) {
ret = -ENOMEM;
@ -2316,30 +2323,34 @@ static int hgsl_ioctl_mem_alloc(
if (ret)
goto out;
/* increase reference count before install fd. */
get_dma_buf(mem_node->dma_buf);
params->fd = dma_buf_fd(mem_node->dma_buf, O_CLOEXEC);
if (params->fd < 0) {
LOGE("dma_buf_fd failed, size 0x%x", mem_node->memdesc.size);
ret = -EINVAL;
dma_buf_put(mem_node->dma_buf);
goto out;
}
if (copy_to_user(USRPTR(params->memdesc),
&mem_node->memdesc, sizeof(mem_node->memdesc))) {
ret = -EFAULT;
goto out;
}
/* increase reference count before install fd. */
get_dma_buf(mem_node->dma_buf);
mutex_lock(&priv->lock);
list_add(&mem_node->node, &priv->mem_allocated);
hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
ret = hgsl_mem_add_node(&priv->mem_allocated, mem_node);
if (unlikely(ret))
dma_buf_put(mem_node->dma_buf);
else {
params->fd = mem_fd;
fd_install(params->fd, mem_node->dma_buf->file);
hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
}
mutex_unlock(&priv->lock);
out:
if (ret && mem_node) {
hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
hgsl_sharedmem_free(mem_node);
if (ret) {
if (mem_node) {
hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
hgsl_sharedmem_free(mem_node);
}
if (mem_fd >= 0)
put_unused_fd(mem_fd);
}
hgsl_hyp_channel_pool_put(hab_channel);
return ret;
@ -2354,7 +2365,6 @@ static int hgsl_ioctl_mem_free(
struct gsl_memdesc_t memdesc;
int ret = 0;
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
struct hgsl_hab_channel_t *hab_channel = NULL;
ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
@ -2371,16 +2381,11 @@ static int hgsl_ioctl_mem_free(
}
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_allocated, node) {
if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr)
&& (tmp->memdesc.size == memdesc.size)) {
node_found = tmp;
list_del(&node_found->node);
break;
}
}
node_found = hgsl_mem_find_node_locked(&priv->mem_allocated,
memdesc.gpuaddr, memdesc.size64, true);
if (node_found)
rb_erase(&node_found->mem_rb_node, &priv->mem_allocated);
mutex_unlock(&priv->lock);
if (node_found) {
ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
if (!ret) {
@ -2390,14 +2395,14 @@ static int hgsl_ioctl_mem_free(
} else {
LOGE("hgsl_hyp_mem_unmap_smmu failed %d", ret);
mutex_lock(&priv->lock);
list_add(&node_found->node, &priv->mem_allocated);
ret = hgsl_mem_add_node(&priv->mem_allocated, node_found);
mutex_unlock(&priv->lock);
if (unlikely(ret))
LOGE("unlikely to get here! %d", ret);
}
} else {
} else
LOGE("can't find the memory 0x%llx, 0x%x",
memdesc.gpuaddr, memdesc.size);
goto out;
}
out:
hgsl_hyp_channel_pool_put(hab_channel);
@ -2413,6 +2418,7 @@ static int hgsl_ioctl_set_metainfo(
int ret = 0;
struct hgsl_mem_node *mem_node = NULL;
struct hgsl_mem_node *tmp = NULL;
struct rb_node *rb = NULL;
char metainfo[HGSL_MEM_META_MAX_SIZE] = {0};
if (params->metainfo_len > HGSL_MEM_META_MAX_SIZE) {
@ -2429,7 +2435,8 @@ static int hgsl_ioctl_set_metainfo(
metainfo[HGSL_MEM_META_MAX_SIZE - 1] = '\0';
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_allocated, node) {
for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) {
tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node);
if (tmp->memdesc.priv64 == params->memdesc_priv) {
mem_node = tmp;
break;
@ -2482,19 +2489,21 @@ static int hgsl_ioctl_mem_map_smmu(
mem_node->memtype = params->memtype;
ret = hgsl_hyp_mem_map_smmu(hab_channel, params->size, params->offset, mem_node);
if (ret)
goto out;
if (ret == 0) {
if (copy_to_user(USRPTR(params->memdesc), &mem_node->memdesc,
sizeof(mem_node->memdesc))) {
ret = -EFAULT;
goto out;
}
mutex_lock(&priv->lock);
list_add(&mem_node->node, &priv->mem_mapped);
hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
mutex_unlock(&priv->lock);
if (copy_to_user(USRPTR(params->memdesc), &mem_node->memdesc,
sizeof(mem_node->memdesc))) {
ret = -EFAULT;
goto out;
}
mutex_lock(&priv->lock);
ret = hgsl_mem_add_node(&priv->mem_mapped, mem_node);
if (likely(!ret))
hgsl_trace_gpu_mem_total(priv, mem_node->memdesc.size64);
mutex_unlock(&priv->lock);
out:
if (ret) {
hgsl_hyp_mem_unmap_smmu(hab_channel, mem_node);
@ -2512,7 +2521,6 @@ static int hgsl_ioctl_mem_unmap_smmu(
struct hgsl_ioctl_mem_unmap_smmu_params *params = data;
int ret = 0;
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
struct hgsl_hab_channel_t *hab_channel = NULL;
ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
@ -2522,31 +2530,29 @@ static int hgsl_ioctl_mem_unmap_smmu(
}
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_mapped, node) {
if ((tmp->memdesc.gpuaddr == params->gpuaddr)
&& (tmp->memdesc.size == params->size)) {
node_found = tmp;
list_del(&node_found->node);
break;
}
}
node_found = hgsl_mem_find_node_locked(&priv->mem_mapped,
params->gpuaddr, params->size, true);
if (node_found)
rb_erase(&node_found->mem_rb_node, &priv->mem_mapped);
mutex_unlock(&priv->lock);
if (node_found) {
hgsl_put_sgt(node_found, false);
ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
if (ret) {
mutex_lock(&priv->lock);
list_add(&node_found->node, &priv->mem_mapped);
mutex_unlock(&priv->lock);
} else {
if (!ret) {
hgsl_trace_gpu_mem_total(priv,
-(node_found->memdesc.size64));
hgsl_free(node_found);
} else {
LOGE("hgsl_hyp_mem_unmap_smmu failed %d", ret);
mutex_lock(&priv->lock);
ret = hgsl_mem_add_node(&priv->mem_mapped, node_found);
mutex_unlock(&priv->lock);
if (unlikely(ret))
LOGE("unlikely to get here! %d", ret);
}
} else {
} else
ret = -EINVAL;
}
out:
hgsl_hyp_channel_pool_put(hab_channel);
@ -2573,15 +2579,16 @@ static int hgsl_ioctl_mem_cache_operation(
}
mutex_lock(&priv->lock);
node_found = hgsl_mem_find_base_locked(&priv->mem_allocated,
gpuaddr, params->sizebytes);
node_found = hgsl_mem_find_node_locked(&priv->mem_allocated,
gpuaddr, params->sizebytes, false);
if (node_found)
internal = true;
else {
node_found = hgsl_mem_find_base_locked(&priv->mem_mapped,
gpuaddr, params->sizebytes);
node_found = hgsl_mem_find_node_locked(&priv->mem_mapped,
gpuaddr, params->sizebytes, false);
if (!node_found) {
LOGE("failed to find node %d", ret);
LOGE("failed to find gpuaddr: 0x%llx size: 0x%llx",
gpuaddr, params->sizebytes);
ret = -EINVAL;
mutex_unlock(&priv->lock);
goto out;
@ -2607,7 +2614,6 @@ static int hgsl_ioctl_mem_get_fd(
struct hgsl_ioctl_mem_get_fd_params *params = data;
struct gsl_memdesc_t memdesc;
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
int ret = 0;
if (copy_from_user(&memdesc, USRPTR(params->memdesc),
@ -2618,28 +2624,25 @@ static int hgsl_ioctl_mem_get_fd(
}
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_allocated, node) {
if ((tmp->memdesc.gpuaddr == memdesc.gpuaddr)
&& (tmp->memdesc.size == memdesc.size)) {
node_found = tmp;
break;
}
}
params->fd = -1;
if (node_found && node_found->dma_buf) {
node_found = hgsl_mem_find_node_locked(&priv->mem_allocated,
memdesc.gpuaddr, memdesc.size64, true);
if (node_found && node_found->dma_buf)
get_dma_buf(node_found->dma_buf);
else
ret = -EINVAL;
mutex_unlock(&priv->lock);
params->fd = -1;
if (!ret) {
params->fd = dma_buf_fd(node_found->dma_buf, O_CLOEXEC);
if (params->fd < 0) {
LOGE("dma buf to fd failed");
ret = -EINVAL;
dma_buf_put(node_found->dma_buf);
}
} else {
} else
LOGE("can't find the memory 0x%llx, 0x%x, node_found:%p",
memdesc.gpuaddr, memdesc.size, node_found);
ret = -EINVAL;
}
mutex_unlock(&priv->lock);
out:
return ret;
@ -3251,8 +3254,8 @@ static int hgsl_open(struct inode *inodep, struct file *filep)
goto out;
}
INIT_LIST_HEAD(&priv->mem_mapped);
INIT_LIST_HEAD(&priv->mem_allocated);
priv->mem_mapped = RB_ROOT;
priv->mem_allocated = RB_ROOT;
mutex_init(&priv->lock);
priv->pid = pid_nr;
@ -3279,33 +3282,27 @@ out:
static int hgsl_cleanup(struct hgsl_priv *priv)
{
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
int ret;
bool need_notify = (!list_empty(&priv->mem_mapped) ||
!list_empty(&priv->mem_allocated));
struct rb_node *next = NULL;
int ret = 0;
struct hgsl_hab_channel_t *hab_channel = NULL;
if (need_notify) {
ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
if (ret)
LOGE("Failed to get channel %d", ret);
if (hgsl_mem_rb_empty(priv))
goto out;
ret = hgsl_hyp_notify_cleanup(hab_channel, HGSL_CLEANUP_WAIT_SLICE_IN_MS);
if (ret == -ETIMEDOUT) {
hgsl_hyp_channel_pool_put(hab_channel);
return ret;
}
ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
if (ret) {
LOGE("Failed to get channel %d", ret);
goto out;
}
ret = hgsl_hyp_notify_cleanup(hab_channel, HGSL_CLEANUP_WAIT_SLICE_IN_MS);
if (ret == -ETIMEDOUT)
goto out;
mutex_lock(&priv->lock);
if ((hab_channel == NULL) &&
(!list_empty(&priv->mem_mapped) || !list_empty(&priv->mem_allocated))) {
ret = hgsl_hyp_channel_pool_get(&priv->hyp_priv, 0, &hab_channel);
if (ret)
LOGE("Failed to get channel %d", ret);
}
list_for_each_entry_safe(node_found, tmp, &priv->mem_mapped, node) {
next = rb_first(&priv->mem_mapped);
while (next) {
node_found = rb_entry(next, struct hgsl_mem_node, mem_rb_node);
hgsl_put_sgt(node_found, false);
ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
if (ret)
@ -3313,22 +3310,30 @@ static int hgsl_cleanup(struct hgsl_priv *priv)
node_found->export_id, node_found->memdesc.gpuaddr, ret);
else
hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64));
list_del(&node_found->node);
next = rb_next(&node_found->mem_rb_node);
rb_erase(&node_found->mem_rb_node, &priv->mem_mapped);
hgsl_free(node_found);
}
list_for_each_entry_safe(node_found, tmp, &priv->mem_allocated, node) {
next = rb_first(&priv->mem_allocated);
while (next) {
node_found = rb_entry(next, struct hgsl_mem_node, mem_rb_node);
ret = hgsl_hyp_mem_unmap_smmu(hab_channel, node_found);
if (ret)
LOGE("Failed to clean mapped buffer %u, 0x%llx, ret %d",
node_found->export_id, node_found->memdesc.gpuaddr, ret);
list_del(&node_found->node);
hgsl_trace_gpu_mem_total(priv, -(node_found->memdesc.size64));
next = rb_next(&node_found->mem_rb_node);
rb_erase(&node_found->mem_rb_node, &priv->mem_allocated);
hgsl_sharedmem_free(node_found);
}
mutex_unlock(&priv->lock);
out:
hgsl_hyp_channel_pool_put(hab_channel);
return 0;
return ret;
}
static int _hgsl_release(struct hgsl_priv *priv)

View file

@ -192,8 +192,8 @@ struct hgsl_priv {
struct list_head node;
struct hgsl_hyp_priv_t hyp_priv;
struct mutex lock;
struct list_head mem_mapped;
struct list_head mem_allocated;
struct rb_root mem_mapped;
struct rb_root mem_allocated;
int open_count;
atomic64_t total_mem_size;
@ -230,6 +230,12 @@ static inline bool hgsl_ts_ge(uint64_t a, uint64_t b, bool is64)
return hgsl_ts32_ge((uint32_t)a, (uint32_t)b);
}
static inline bool hgsl_mem_rb_empty(struct hgsl_priv *priv)
{
return (RB_EMPTY_ROOT(&priv->mem_mapped) &&
RB_EMPTY_ROOT(&priv->mem_allocated));
}
/**
* struct hgsl_hsync_timeline - A sync timeline attached under each hgsl context
* @kref: Refcount to keep the struct alive

View file

@ -14,12 +14,14 @@ static int hgsl_client_mem_show(struct seq_file *s, void *unused)
{
struct hgsl_priv *priv = s->private;
struct hgsl_mem_node *tmp = NULL;
struct rb_node *rb = NULL;
seq_printf(s, "%16s %16s %10s %10s\n",
"gpuaddr", "size", "flags", "type");
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_allocated, node) {
for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) {
tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node);
seq_printf(s, "%p %16llx %10x %10d\n",
tmp->memdesc.gpuaddr,
tmp->memdesc.size,
@ -37,6 +39,7 @@ static int hgsl_client_memtype_show(struct seq_file *s, void *unused)
{
struct hgsl_priv *priv = s->private;
struct hgsl_mem_node *tmp = NULL;
struct rb_node *rb = NULL;
int i;
int memtype;
@ -71,7 +74,8 @@ static int hgsl_client_memtype_show(struct seq_file *s, void *unused)
gpu_mem_types[i].size = 0;
mutex_lock(&priv->lock);
list_for_each_entry(tmp, &priv->mem_allocated, node) {
for (rb = rb_first(&priv->mem_allocated); rb; rb = rb_next(rb)) {
tmp = rb_entry(rb, struct hgsl_mem_node, mem_rb_node);
memtype = GET_MEMTYPE(tmp->flags);
if (memtype < ARRAY_SIZE(gpu_mem_types))
gpu_mem_types[memtype].size += tmp->memdesc.size;

View file

@ -612,24 +612,6 @@ void hgsl_sharedmem_free(struct hgsl_mem_node *mem_node)
}
struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head,
uint64_t gpuaddr, uint64_t size)
{
struct hgsl_mem_node *node_found = NULL;
struct hgsl_mem_node *tmp = NULL;
uint64_t end = gpuaddr + size;
list_for_each_entry(tmp, head, node) {
if ((tmp->memdesc.gpuaddr <= gpuaddr)
&& ((tmp->memdesc.gpuaddr + tmp->memdesc.size) >= end)) {
node_found = tmp;
break;
}
}
return node_found;
}
void *hgsl_mem_node_zalloc(bool iocoherency)
{
struct hgsl_mem_node *mem_node = NULL;
@ -644,4 +626,64 @@ out:
return mem_node;
}
int hgsl_mem_add_node(struct rb_root *rb_root,
struct hgsl_mem_node *mem_node)
{
struct rb_node **cur;
struct rb_node *parent = NULL;
struct hgsl_mem_node *node = NULL;
int ret = 0;
cur = &rb_root->rb_node;
while (*cur) {
parent = *cur;
node = rb_entry(parent, struct hgsl_mem_node, mem_rb_node);
if (mem_node->memdesc.gpuaddr > node->memdesc.gpuaddr)
cur = &parent->rb_right;
else if (mem_node->memdesc.gpuaddr < node->memdesc.gpuaddr)
cur = &parent->rb_left;
else {
LOGE("Duplicate gpuaddr: 0x%llx",
mem_node->memdesc.gpuaddr);
ret = -EEXIST;
goto out;
}
}
rb_link_node(&mem_node->mem_rb_node, parent, cur);
rb_insert_color(&mem_node->mem_rb_node, rb_root);
out:
return ret;
}
struct hgsl_mem_node *hgsl_mem_find_node_locked(
struct rb_root *rb_root, uint64_t gpuaddr,
uint64_t size, bool accurate)
{
struct rb_node *cur = NULL;
struct hgsl_mem_node *node_found = NULL;
cur = rb_root->rb_node;
while (cur) {
node_found = rb_entry(cur, struct hgsl_mem_node, mem_rb_node);
if (hgsl_mem_range_inspect(
node_found->memdesc.gpuaddr, gpuaddr,
node_found->memdesc.size64, size,
accurate)) {
return node_found;
} else if (node_found->memdesc.gpuaddr < gpuaddr)
cur = cur->rb_right;
else if (node_found->memdesc.gpuaddr > gpuaddr)
cur = cur->rb_left;
else {
LOGE("Invalid addr: 0x%llx size: [0x%llx 0x%llx]",
gpuaddr, size, node_found->memdesc.size64);
goto out;
}
}
out:
return NULL;
}
MODULE_IMPORT_NS(DMA_BUF);

View file

@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
#include <linux/rbtree.h>
#include "hgsl_types.h"
#include "hgsl_utils.h"
@ -49,7 +50,7 @@ enum gsl_user_mem_type_t {
};
struct hgsl_mem_node {
struct list_head node;
struct rb_node mem_rb_node;
struct gsl_memdesc_t memdesc;
int32_t fd;
uint32_t export_id;
@ -79,9 +80,21 @@ int hgsl_mem_cache_op(struct device *dev, struct hgsl_mem_node *mem_node,
void hgsl_put_sgt(struct hgsl_mem_node *mem_node, bool internal);
struct hgsl_mem_node *hgsl_mem_find_base_locked(struct list_head *head,
uint64_t gpuaddr, uint64_t size);
void *hgsl_mem_node_zalloc(bool iocoherency);
int hgsl_mem_add_node(struct rb_root *rb_root,
struct hgsl_mem_node *mem_node);
struct hgsl_mem_node *hgsl_mem_find_node_locked(
struct rb_root *rb_root, uint64_t gpuaddr,
uint64_t size, bool accurate);
static inline bool hgsl_mem_range_inspect(uint64_t da1, uint64_t da2,
uint64_t size1, uint64_t size2, bool accurate)
{
if (accurate)
return ((da1 == da2) && (size1 == size2));
else
return ((da1 <= da2) && (da1 + size1) >= (da2 + size2));
}
#endif

766
drivers/soc/qcom/msm-spm.c Normal file
View file

@ -0,0 +1,766 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/slab.h>
#include "spm_driver.h"
#define MSM_SPM_PMIC_STATE_IDLE 0
enum {
MSM_SPM_DEBUG_SHADOW = 1U << 0,
MSM_SPM_DEBUG_VCTL = 1U << 1,
};
static int msm_spm_debug_mask;
module_param_named(
debug_mask, msm_spm_debug_mask, int, 0664
);
struct saw2_data {
const char *ver_name;
uint32_t major;
uint32_t minor;
uint32_t *spm_reg_offset_ptr;
};
static uint32_t msm_spm_reg_offsets_saw2_v2_1[MSM_SPM_REG_NR] = {
[MSM_SPM_REG_SAW_SECURE] = 0x00,
[MSM_SPM_REG_SAW_ID] = 0x04,
[MSM_SPM_REG_SAW_CFG] = 0x08,
[MSM_SPM_REG_SAW_SPM_STS] = 0x0C,
[MSM_SPM_REG_SAW_AVS_STS] = 0x10,
[MSM_SPM_REG_SAW_PMIC_STS] = 0x14,
[MSM_SPM_REG_SAW_RST] = 0x18,
[MSM_SPM_REG_SAW_VCTL] = 0x1C,
[MSM_SPM_REG_SAW_AVS_CTL] = 0x20,
[MSM_SPM_REG_SAW_AVS_LIMIT] = 0x24,
[MSM_SPM_REG_SAW_AVS_DLY] = 0x28,
[MSM_SPM_REG_SAW_AVS_HYSTERESIS] = 0x2C,
[MSM_SPM_REG_SAW_SPM_CTL] = 0x30,
[MSM_SPM_REG_SAW_SPM_DLY] = 0x34,
[MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40,
[MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44,
[MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48,
[MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C,
[MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50,
[MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54,
[MSM_SPM_REG_SAW_PMIC_DATA_6] = 0x58,
[MSM_SPM_REG_SAW_PMIC_DATA_7] = 0x5C,
[MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x80,
[MSM_SPM_REG_SAW_VERSION] = 0xFD0,
};
static uint32_t msm_spm_reg_offsets_saw2_v3_0[MSM_SPM_REG_NR] = {
[MSM_SPM_REG_SAW_SECURE] = 0x00,
[MSM_SPM_REG_SAW_ID] = 0x04,
[MSM_SPM_REG_SAW_CFG] = 0x08,
[MSM_SPM_REG_SAW_SPM_STS] = 0x0C,
[MSM_SPM_REG_SAW_AVS_STS] = 0x10,
[MSM_SPM_REG_SAW_PMIC_STS] = 0x14,
[MSM_SPM_REG_SAW_RST] = 0x18,
[MSM_SPM_REG_SAW_VCTL] = 0x1C,
[MSM_SPM_REG_SAW_AVS_CTL] = 0x20,
[MSM_SPM_REG_SAW_AVS_LIMIT] = 0x24,
[MSM_SPM_REG_SAW_AVS_DLY] = 0x28,
[MSM_SPM_REG_SAW_AVS_HYSTERESIS] = 0x2C,
[MSM_SPM_REG_SAW_SPM_CTL] = 0x30,
[MSM_SPM_REG_SAW_SPM_DLY] = 0x34,
[MSM_SPM_REG_SAW_STS2] = 0x38,
[MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40,
[MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44,
[MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48,
[MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C,
[MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50,
[MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54,
[MSM_SPM_REG_SAW_PMIC_DATA_6] = 0x58,
[MSM_SPM_REG_SAW_PMIC_DATA_7] = 0x5C,
[MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x400,
[MSM_SPM_REG_SAW_VERSION] = 0xFD0,
};
static uint32_t msm_spm_reg_offsets_saw2_v4_1[MSM_SPM_REG_NR] = {
[MSM_SPM_REG_SAW_SECURE] = 0xC00,
[MSM_SPM_REG_SAW_ID] = 0xC04,
[MSM_SPM_REG_SAW_STS2] = 0xC10,
[MSM_SPM_REG_SAW_SPM_STS] = 0xC0C,
[MSM_SPM_REG_SAW_AVS_STS] = 0xC14,
[MSM_SPM_REG_SAW_PMIC_STS] = 0xC18,
[MSM_SPM_REG_SAW_RST] = 0xC1C,
[MSM_SPM_REG_SAW_VCTL] = 0x900,
[MSM_SPM_REG_SAW_AVS_CTL] = 0x904,
[MSM_SPM_REG_SAW_AVS_LIMIT] = 0x908,
[MSM_SPM_REG_SAW_AVS_DLY] = 0x90C,
[MSM_SPM_REG_SAW_SPM_CTL] = 0x0,
[MSM_SPM_REG_SAW_SPM_DLY] = 0x4,
[MSM_SPM_REG_SAW_CFG] = 0x0C,
[MSM_SPM_REG_SAW_PMIC_DATA_0] = 0x40,
[MSM_SPM_REG_SAW_PMIC_DATA_1] = 0x44,
[MSM_SPM_REG_SAW_PMIC_DATA_2] = 0x48,
[MSM_SPM_REG_SAW_PMIC_DATA_3] = 0x4C,
[MSM_SPM_REG_SAW_PMIC_DATA_4] = 0x50,
[MSM_SPM_REG_SAW_PMIC_DATA_5] = 0x54,
[MSM_SPM_REG_SAW_SEQ_ENTRY] = 0x400,
[MSM_SPM_REG_SAW_VERSION] = 0xFD0,
};
static struct saw2_data saw2_info[] = {
[0] = {
"SAW_v2.1",
0x2,
0x1,
msm_spm_reg_offsets_saw2_v2_1,
},
[1] = {
"SAW_v2.3",
0x3,
0x0,
msm_spm_reg_offsets_saw2_v3_0,
},
[2] = {
"SAW_v3.0",
0x1,
0x0,
msm_spm_reg_offsets_saw2_v3_0,
},
[3] = {
"SAW_v4.0",
0x4,
0x1,
msm_spm_reg_offsets_saw2_v4_1,
},
};
static uint32_t num_pmic_data;
static void msm_spm_drv_flush_shadow(struct msm_spm_driver_data *dev,
unsigned int reg_index)
{
if (!dev || reg_index >= MSM_SPM_REG_NR)
return;
__raw_writel(dev->reg_shadow[reg_index],
dev->reg_base_addr + dev->reg_offsets[reg_index]);
}
static void msm_spm_drv_load_shadow(struct msm_spm_driver_data *dev,
unsigned int reg_index)
{
if (!dev || reg_index >= MSM_SPM_REG_NR)
return;
dev->reg_shadow[reg_index] =
__raw_readl(dev->reg_base_addr +
dev->reg_offsets[reg_index]);
}
static inline uint32_t msm_spm_drv_get_num_spm_entry(
struct msm_spm_driver_data *dev)
{
if (!dev)
return -ENODEV;
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 24) & 0xFF;
}
static inline void msm_spm_drv_set_start_addr(
struct msm_spm_driver_data *dev, uint32_t ctl)
{
dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] = ctl;
}
static inline bool msm_spm_pmic_arb_present(struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 2) & 0x1;
}
static inline void msm_spm_drv_set_vctl2(struct msm_spm_driver_data *dev,
uint32_t vlevel, uint32_t vctl_port)
{
unsigned int pmic_data = 0;
pmic_data |= vlevel;
pmic_data |= (vctl_port & 0x7) << 16;
dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] &= ~0x700FF;
dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_DATA_3] |= pmic_data;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_PMIC_DATA_3);
}
static inline uint32_t msm_spm_drv_get_num_pmic_data(
struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_ID);
mb(); /* Ensure we flush */
return (dev->reg_shadow[MSM_SPM_REG_SAW_ID] >> 4) & 0x7;
}
static inline uint32_t msm_spm_drv_get_sts_pmic_state(
struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
return (dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] >> 16) &
0x03;
}
uint32_t msm_spm_drv_get_sts_curr_pmic_data(
struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0x300FF;
}
static inline void msm_spm_drv_get_saw2_ver(struct msm_spm_driver_data *dev,
uint32_t *major, uint32_t *minor)
{
uint32_t val = 0;
dev->reg_shadow[MSM_SPM_REG_SAW_VERSION] =
__raw_readl(dev->reg_base_addr + dev->ver_reg);
val = dev->reg_shadow[MSM_SPM_REG_SAW_VERSION];
*major = (val >> 28) & 0xF;
*minor = (val >> 16) & 0xFFF;
}
inline int msm_spm_drv_set_spm_enable(
struct msm_spm_driver_data *dev, bool enable)
{
uint32_t value = enable ? 0x01 : 0x00;
if (!dev)
return -EINVAL;
if ((dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] & 0x01) ^ value) {
dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] &= ~0x1;
dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL] |= value;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
wmb(); /* Ensure we flush */
}
return 0;
}
int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev)
{
if (!dev)
return -EINVAL;
return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x01;
}
int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
bool enable)
{
uint32_t value = enable ? 0x1 : 0x0;
if (!dev)
return -EINVAL;
if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & 0x1) ^ value) {
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x1;
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
return 0;
}
int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
uint32_t min_lvl, uint32_t max_lvl)
{
uint32_t value = (max_lvl & 0xff) << 16 | (min_lvl & 0xff);
if (!dev)
return -EINVAL;
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_LIMIT] = value;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_LIMIT);
return 0;
}
static int msm_spm_drv_avs_irq_mask(enum msm_spm_avs_irq irq)
{
switch (irq) {
case MSM_SPM_AVS_IRQ_MIN:
return BIT(1);
case MSM_SPM_AVS_IRQ_MAX:
return BIT(2);
default:
return -EINVAL;
}
}
int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
enum msm_spm_avs_irq irq, bool enable)
{
int mask = msm_spm_drv_avs_irq_mask(irq);
uint32_t value;
if (!dev)
return -EINVAL;
else if (mask < 0)
return mask;
value = enable ? mask : 0;
if ((dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) ^ value) {
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= value;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
return 0;
}
int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
enum msm_spm_avs_irq irq)
{
int mask = msm_spm_drv_avs_irq_mask(irq);
if (!dev)
return -EINVAL;
else if (mask < 0)
return mask;
if (dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & mask) {
/*
* The interrupt status is cleared by disabling and then
* re-enabling the interrupt.
*/
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~mask;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= mask;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
return 0;
}
void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev)
{
int i;
int num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
if (!dev) {
__WARN();
return;
}
for (i = 0; i < num_spm_entry; i++) {
__raw_writel(dev->reg_seq_entry_shadow[i],
dev->reg_base_addr
+ dev->reg_offsets[MSM_SPM_REG_SAW_SEQ_ENTRY]
+ 4 * i);
}
mb(); /* Ensure we flush */
}
void dump_regs(struct msm_spm_driver_data *dev, int cpu)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
mb(); /* Ensure we flush */
pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_STS: 0x%x\n", cpu,
dev->reg_shadow[MSM_SPM_REG_SAW_SPM_STS]);
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
mb(); /* Ensure we flush */
pr_err("CPU%d: spm register MSM_SPM_REG_SAW_SPM_CTL: 0x%x\n", cpu,
dev->reg_shadow[MSM_SPM_REG_SAW_SPM_CTL]);
}
int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
uint8_t *cmd, uint32_t *offset)
{
uint32_t cmd_w;
uint32_t offset_w = *offset / 4;
uint8_t last_cmd;
if (!cmd)
return -EINVAL;
while (1) {
int i;
cmd_w = 0;
last_cmd = 0;
cmd_w = dev->reg_seq_entry_shadow[offset_w];
for (i = (*offset % 4); i < 4; i++) {
last_cmd = *(cmd++);
cmd_w |= last_cmd << (i * 8);
(*offset)++;
if (last_cmd == 0x0f)
break;
}
dev->reg_seq_entry_shadow[offset_w++] = cmd_w;
if (last_cmd == 0x0f)
break;
}
return 0;
}
int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
uint32_t ctl)
{
/* SPM is configured to reset start address to zero after end of Program
*/
if (!dev)
return -EINVAL;
msm_spm_drv_set_start_addr(dev, ctl);
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL);
wmb(); /* Ensure we flush */
if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) {
int i;
for (i = 0; i < MSM_SPM_REG_NR; i++)
pr_info("%s: reg %02x = 0x%08x\n", __func__,
dev->reg_offsets[i], dev->reg_shadow[i]);
}
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_SPM_STS);
return 0;
}
uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_PMIC_STS);
return dev->reg_shadow[MSM_SPM_REG_SAW_PMIC_STS] & 0xFF;
}
#ifdef CONFIG_MSM_AVS_HW
static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
return dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] & BIT(0);
}
static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev)
{
msm_spm_drv_load_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~BIT(27);
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev)
{
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= BIT(27);
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
unsigned int vlevel)
{
vlevel &= 0x3f;
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] &= ~0x7efc00;
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= ((vlevel - 4) << 10);
dev->reg_shadow[MSM_SPM_REG_SAW_AVS_CTL] |= (vlevel << 17);
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_AVS_CTL);
}
#else
static bool msm_spm_drv_is_avs_enabled(struct msm_spm_driver_data *dev)
{
return false;
}
static void msm_spm_drv_disable_avs(struct msm_spm_driver_data *dev) { }
static void msm_spm_drv_enable_avs(struct msm_spm_driver_data *dev) { }
static void msm_spm_drv_set_avs_vlevel(struct msm_spm_driver_data *dev,
unsigned int vlevel)
{
}
#endif
static inline int msm_spm_drv_validate_data(struct msm_spm_driver_data *dev,
unsigned int vlevel, int vctl_port)
{
int timeout_us = dev->vctl_timeout_us;
uint32_t new_level;
/* Confirm the voltage we set was what hardware sent and
* FSM is idle.
*/
do {
udelay(1);
new_level = msm_spm_drv_get_sts_curr_pmic_data(dev);
/**
* VCTL_PORT has to be 0, for vlevel to be updated.
* If port is not 0, check for PMIC_STATE only.
*/
if (((new_level & 0x30000) == MSM_SPM_PMIC_STATE_IDLE) &&
(vctl_port || ((new_level & 0xFF) == vlevel)))
break;
} while (--timeout_us);
if (!timeout_us) {
pr_err("Wrong level %#x\n", new_level);
return -EIO;
}
if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
pr_info("%s: done, remaining timeout %u us\n",
__func__, timeout_us);
return 0;
}
int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev, unsigned int vlevel)
{
uint32_t vlevel_set = vlevel;
bool avs_enabled;
int ret = 0;
if (!dev)
return -EINVAL;
avs_enabled = msm_spm_drv_is_avs_enabled(dev);
if (!msm_spm_pmic_arb_present(dev))
return -ENODEV;
if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL)
pr_info("%s: requesting vlevel %#x\n", __func__, vlevel);
if (avs_enabled)
msm_spm_drv_disable_avs(dev);
if (dev->vctl_port_ub >= 0) {
/**
* VCTL can send 8bit voltage level at once.
* Send lower 8bit first, vlevel change happens
* when upper 8bit is sent.
*/
vlevel = vlevel_set & 0xFF;
}
/* Kick the state machine back to idle */
dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port);
ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port);
if (ret)
goto set_vdd_bail;
if (dev->vctl_port_ub >= 0) {
/* Send upper 8bit of voltage level */
vlevel = (vlevel_set >> 8) & 0xFF;
/* Kick the state machine back to idle */
dev->reg_shadow[MSM_SPM_REG_SAW_RST] = 1;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_RST);
/*
* Steps for sending for vctl port other than '0'
* Write VCTL register with pmic data and address index
* Perform system barrier
* Wait for 1us
* Read PMIC_STS register to make sure operation is complete
*/
msm_spm_drv_set_vctl2(dev, vlevel, dev->vctl_port_ub);
mb(); /* To make sure data is sent before checking status */
ret = msm_spm_drv_validate_data(dev, vlevel, dev->vctl_port_ub);
if (ret)
goto set_vdd_bail;
}
/* Set AVS min/max */
if (avs_enabled) {
msm_spm_drv_set_avs_vlevel(dev, vlevel_set);
msm_spm_drv_enable_avs(dev);
}
return ret;
set_vdd_bail:
if (avs_enabled)
msm_spm_drv_enable_avs(dev);
pr_err("%s: failed %#x vlevel setting in timeout %uus\n",
__func__, vlevel_set, dev->vctl_timeout_us);
return -EIO;
}
static int msm_spm_drv_get_pmic_port(struct msm_spm_driver_data *dev,
enum msm_spm_pmic_port port)
{
int index = -1;
switch (port) {
case MSM_SPM_PMIC_VCTL_PORT:
index = dev->vctl_port;
break;
case MSM_SPM_PMIC_PHASE_PORT:
index = dev->phase_port;
break;
case MSM_SPM_PMIC_PFM_PORT:
index = dev->pfm_port;
break;
default:
break;
}
return index;
}
int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
enum msm_spm_pmic_port port, unsigned int data)
{
unsigned int pmic_data = 0;
unsigned int timeout_us = 0;
int index = 0;
if (!msm_spm_pmic_arb_present(dev))
return -ENODEV;
index = msm_spm_drv_get_pmic_port(dev, port);
if (index < 0)
return -ENODEV;
pmic_data |= data & 0xFF;
pmic_data |= (index & 0x7) << 16;
dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] &= ~0x700FF;
dev->reg_shadow[MSM_SPM_REG_SAW_VCTL] |= pmic_data;
msm_spm_drv_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL);
mb(); /* Ensure we flush */
timeout_us = dev->vctl_timeout_us;
/**
* Confirm the pmic data set was what hardware sent by
* checking the PMIC FSM state.
* We cannot use the sts_pmic_data and check it against
* the value like we do fot set_vdd, since the PMIC_STS
* is only updated for SAW_VCTL sent with port index 0.
*/
do {
if (msm_spm_drv_get_sts_pmic_state(dev) ==
MSM_SPM_PMIC_STATE_IDLE)
break;
udelay(1);
} while (--timeout_us);
if (!timeout_us) {
pr_err("%s: failed, remaining timeout %u us, data %d\n",
__func__, timeout_us, data);
return -EIO;
}
return 0;
}
void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq_write)
{
int i;
if (seq_write)
msm_spm_drv_flush_seq_entry(dev);
for (i = 0; i < MSM_SPM_REG_SAW_PMIC_DATA_0 + num_pmic_data; i++)
msm_spm_drv_load_shadow(dev, i);
for (i = MSM_SPM_REG_NR_INITIALIZE + 1; i < MSM_SPM_REG_NR; i++)
msm_spm_drv_load_shadow(dev, i);
}
int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
struct msm_spm_platform_data *data)
{
int i;
bool found = false;
dev->ver_reg = data->ver_reg;
dev->reg_base_addr = data->reg_base_addr;
msm_spm_drv_get_saw2_ver(dev, &dev->major, &dev->minor);
for (i = 0; i < ARRAY_SIZE(saw2_info); i++)
if (dev->major == saw2_info[i].major &&
dev->minor == saw2_info[i].minor) {
pr_debug("%s: Version found\n",
saw2_info[i].ver_name);
dev->reg_offsets = saw2_info[i].spm_reg_offset_ptr;
found = true;
break;
}
if (!found) {
pr_err("%s: No SAW version found\n", __func__);
WARN_ON(!found);
}
return 0;
}
void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
int val)
{
dev->reg_shadow[id] = val;
msm_spm_drv_flush_shadow(dev, id);
/* Complete the above writes before other accesses */
mb();
}
int msm_spm_drv_init(struct msm_spm_driver_data *dev,
struct msm_spm_platform_data *data)
{
int num_spm_entry;
if (!dev || !data)
return -ENODEV;
dev->vctl_port = data->vctl_port;
dev->vctl_port_ub = data->vctl_port_ub;
dev->phase_port = data->phase_port;
dev->pfm_port = data->pfm_port;
dev->reg_base_addr = data->reg_base_addr;
memcpy(dev->reg_shadow, data->reg_init_values,
sizeof(data->reg_init_values));
dev->vctl_timeout_us = data->vctl_timeout_us;
if (!num_pmic_data)
num_pmic_data = msm_spm_drv_get_num_pmic_data(dev);
num_spm_entry = msm_spm_drv_get_num_spm_entry(dev);
dev->reg_seq_entry_shadow =
kcalloc(num_spm_entry, sizeof(*dev->reg_seq_entry_shadow),
GFP_KERNEL);
if (!dev->reg_seq_entry_shadow)
return -ENOMEM;
return 0;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2017, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
#define __ARCH_ARM_MACH_MSM_SPM_DEVICES_H
#include <soc/qcom/spm.h>
enum {
MSM_SPM_REG_SAW_CFG,
MSM_SPM_REG_SAW_AVS_CTL,
MSM_SPM_REG_SAW_AVS_HYSTERESIS,
MSM_SPM_REG_SAW_SPM_CTL,
MSM_SPM_REG_SAW_PMIC_DLY,
MSM_SPM_REG_SAW_AVS_LIMIT,
MSM_SPM_REG_SAW_AVS_DLY,
MSM_SPM_REG_SAW_SPM_DLY,
MSM_SPM_REG_SAW_PMIC_DATA_0,
MSM_SPM_REG_SAW_PMIC_DATA_1,
MSM_SPM_REG_SAW_PMIC_DATA_2,
MSM_SPM_REG_SAW_PMIC_DATA_3,
MSM_SPM_REG_SAW_PMIC_DATA_4,
MSM_SPM_REG_SAW_PMIC_DATA_5,
MSM_SPM_REG_SAW_PMIC_DATA_6,
MSM_SPM_REG_SAW_PMIC_DATA_7,
MSM_SPM_REG_SAW_RST,
MSM_SPM_REG_NR_INITIALIZE = MSM_SPM_REG_SAW_RST,
MSM_SPM_REG_SAW_ID,
MSM_SPM_REG_SAW_SECURE,
MSM_SPM_REG_SAW_STS0,
MSM_SPM_REG_SAW_STS1,
MSM_SPM_REG_SAW_STS2,
MSM_SPM_REG_SAW_VCTL,
MSM_SPM_REG_SAW_SEQ_ENTRY,
MSM_SPM_REG_SAW_SPM_STS,
MSM_SPM_REG_SAW_AVS_STS,
MSM_SPM_REG_SAW_PMIC_STS,
MSM_SPM_REG_SAW_VERSION,
MSM_SPM_REG_NR,
};
struct msm_spm_seq_entry {
uint32_t mode;
uint8_t *cmd;
uint32_t ctl;
};
struct msm_spm_platform_data {
void __iomem *reg_base_addr;
uint32_t reg_init_values[MSM_SPM_REG_NR_INITIALIZE];
uint32_t ver_reg;
uint32_t vctl_port;
int vctl_port_ub;
uint32_t phase_port;
uint32_t pfm_port;
uint8_t awake_vlevel;
uint32_t vctl_timeout_us;
uint32_t avs_timeout_us;
uint32_t num_modes;
struct msm_spm_seq_entry *modes;
};
enum msm_spm_pmic_port {
MSM_SPM_PMIC_VCTL_PORT,
MSM_SPM_PMIC_PHASE_PORT,
MSM_SPM_PMIC_PFM_PORT,
};
struct msm_spm_driver_data {
uint32_t major;
uint32_t minor;
uint32_t ver_reg;
uint32_t vctl_port;
int vctl_port_ub;
uint32_t phase_port;
uint32_t pfm_port;
void __iomem *reg_base_addr;
uint32_t vctl_timeout_us;
uint32_t avs_timeout_us;
uint32_t reg_shadow[MSM_SPM_REG_NR];
uint32_t *reg_seq_entry_shadow;
uint32_t *reg_offsets;
};
int msm_spm_drv_init(struct msm_spm_driver_data *dev,
struct msm_spm_platform_data *data);
int msm_spm_drv_reg_init(struct msm_spm_driver_data *dev,
struct msm_spm_platform_data *data);
void msm_spm_drv_reinit(struct msm_spm_driver_data *dev, bool seq);
int msm_spm_drv_set_low_power_mode(struct msm_spm_driver_data *dev,
uint32_t ctl);
int msm_spm_drv_set_vdd(struct msm_spm_driver_data *dev,
unsigned int vlevel);
void dump_regs(struct msm_spm_driver_data *dev, int cpu);
uint32_t msm_spm_drv_get_sts_curr_pmic_data(
struct msm_spm_driver_data *dev);
int msm_spm_drv_write_seq_data(struct msm_spm_driver_data *dev,
uint8_t *cmd, uint32_t *offset);
void msm_spm_drv_flush_seq_entry(struct msm_spm_driver_data *dev);
int msm_spm_drv_set_spm_enable(struct msm_spm_driver_data *dev,
bool enable);
int msm_spm_drv_set_pmic_data(struct msm_spm_driver_data *dev,
enum msm_spm_pmic_port port, unsigned int data);
int msm_spm_drv_set_avs_limit(struct msm_spm_driver_data *dev,
uint32_t min_lvl, uint32_t max_lvl);
int msm_spm_drv_set_avs_enable(struct msm_spm_driver_data *dev,
bool enable);
int msm_spm_drv_get_avs_enable(struct msm_spm_driver_data *dev);
int msm_spm_drv_set_avs_irq_enable(struct msm_spm_driver_data *dev,
enum msm_spm_avs_irq irq, bool enable);
int msm_spm_drv_avs_clear_irq(struct msm_spm_driver_data *dev,
enum msm_spm_avs_irq irq);
void msm_spm_reinit(void);
int msm_spm_init(struct msm_spm_platform_data *data, int nr_devs);
void msm_spm_drv_upd_reg_shadow(struct msm_spm_driver_data *dev, int id,
int val);
uint32_t msm_spm_drv_get_vdd(struct msm_spm_driver_data *dev);
#endif

View file

@ -19,22 +19,20 @@ static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param
u32 status = 0;
if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) {
Q2SPI_DEBUG(q2spi, "%s for Doorbell\n", __func__);
Q2SPI_DBG_2(q2spi, "%s for Doorbell\n", __func__);
xfer = q2spi->db_xfer;
} else {
xfer = q2spi_pkt->xfer;
Q2SPI_DEBUG(q2spi, "%s for Rx Event\n", __func__);
Q2SPI_DBG_2(q2spi, "%s for Rx Event\n", __func__);
}
if (!xfer || !xfer->rx_buf) {
pr_err("%s rx buf NULL!!!\n", __func__);
return;
}
Q2SPI_DEBUG(q2spi, "%s cb_param:%p cb_param->len:%d cb_param->status:%d\n",
__func__, cb_param, cb_param->length, cb_param->status);
Q2SPI_DEBUG(q2spi, "%s xfer:%p rx_buf:%p rx_dma:%p rx_len:%d m_cmd_param:%d\n",
__func__, xfer, xfer->rx_buf, (void *)xfer->rx_dma, xfer->rx_len,
q2spi_pkt->m_cmd_param);
Q2SPI_DBG_2(q2spi, "%s cb_param:%p len:%d status:%d xfer:%p rx buf:%p dma:%p len:%d\n",
__func__, cb_param, cb_param->length, cb_param->status, xfer, xfer->rx_buf,
(void *)xfer->rx_dma, xfer->rx_len);
/* check status is 0 or EOT for success */
status = cb_param->status;
@ -43,13 +41,13 @@ static void q2spi_rx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param
q2spi_dump_ipc(q2spi, "rx_xfer_completion_event RX",
(char *)xfer->rx_buf, cb_param->length);
if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY) {
Q2SPI_DEBUG(q2spi, "%s call db_rx_cb\n", __func__);
Q2SPI_DBG_1(q2spi, "%s call db_rx_cb\n", __func__);
complete_all(&q2spi->db_rx_cb);
} else {
Q2SPI_DEBUG(q2spi, "%s call rx_cb\n", __func__);
Q2SPI_DBG_1(q2spi, "%s call rx_cb\n", __func__);
complete_all(&q2spi->rx_cb);
}
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
Q2SPI_DBG_2(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
__func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype);
} else {
Q2SPI_DEBUG(q2spi, "%s Err length miss-match %d %d\n",
@ -77,10 +75,10 @@ static void q2spi_tx_xfer_completion_event(struct msm_gpi_dma_async_tx_cb_param
xfer = q2spi_pkt->xfer;
Q2SPI_DEBUG(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__,
Q2SPI_DBG_1(q2spi, "%s xfer->tx_len:%d cb_param_length:%d\n", __func__,
xfer->tx_len, cb_param->length);
if (cb_param->length == xfer->tx_len) {
Q2SPI_DEBUG(q2spi, "%s complete_tx_cb\n", __func__);
Q2SPI_DBG_1(q2spi, "%s complete_tx_cb\n", __func__);
complete_all(&q2spi->tx_cb);
} else {
Q2SPI_DEBUG(q2spi, "%s Err length miss-match\n", __func__);
@ -94,15 +92,14 @@ static void q2spi_parse_q2spi_status(struct msm_gpi_dma_async_tx_cb_param *cb_pa
u32 status = 0;
status = cb_param->q2spi_status;
Q2SPI_DEBUG(q2spi, "%s status:%d complete_tx_cb\n", __func__, status);
Q2SPI_DBG_1(q2spi, "%s status:%d complete_tx_cb\n", __func__, status);
complete_all(&q2spi->tx_cb);
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
Q2SPI_DBG_2(q2spi, "%s q2spi_pkt:%p state=%d vtype:%d\n",
__func__, q2spi_pkt, q2spi_pkt->state, q2spi_pkt->vtype);
}
static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb const *cb)
{
Q2SPI_DEBUG(q2spi, "%s line:%d\n", __func__, __LINE__);
q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event);
}
@ -171,13 +168,16 @@ static void q2spi_gsi_tx_callback(void *cb)
complete_all(&q2spi->tx_cb);
return;
} else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
Q2SPI_DBG_2(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
Q2SPI_DBG_1(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
q2spi_tx_xfer_completion_event(cb_param);
} else if (cb_param->tce_type == QUP_TCE_TYPE_Q2SPI_STATUS) {
Q2SPI_DEBUG(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__);
Q2SPI_DBG_1(q2spi, "%s QUP_TCE_TYPE_Q2SPI_STATUS\n", __func__);
q2spi_parse_q2spi_status(cb_param);
} else {
Q2SPI_ERROR(q2spi, "%s cb_param->tce_type:%d\n",
__func__, cb_param->tce_type);
}
}
}
@ -214,17 +214,16 @@ static void q2spi_gsi_rx_callback(void *cb)
__func__, cb_param->status);
return;
} else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
Q2SPI_DBG_2(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
if (cb_param->tce_type == XFER_COMPLETE_EV_TYPE) {
/* CR header */
Q2SPI_DEBUG(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
Q2SPI_DBG_1(q2spi, "%s TCE XFER_COMPLETE_EV_TYPE\n", __func__);
q2spi_rx_xfer_completion_event(cb_param);
}
} else {
Q2SPI_DEBUG(q2spi, "%s: Err cb_param->completion_code = %d\n",
__func__, cb_param->completion_code);
}
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
}
static void q2spi_geni_deallocate_chan(struct q2spi_gsi *gsi)
@ -266,7 +265,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
return -ENOMEM;
}
q2spi->gsi = gsi;
Q2SPI_DEBUG(q2spi, "%s gsi:%p\n", __func__, gsi);
Q2SPI_DBG_2(q2spi, "%s gsi:%p\n", __func__, gsi);
if (gsi->chan_setup) {
Q2SPI_DEBUG(q2spi, "%s Err GSI channel already configured\n", __func__);
return ret;
@ -279,7 +278,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
q2spi_kfree(q2spi, q2spi->gsi, __LINE__);
return -EIO;
}
Q2SPI_DEBUG(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c);
Q2SPI_DBG_2(q2spi, "%s gsi_tx_c:%p\n", __func__, gsi->tx_c);
gsi->rx_c = dma_request_slave_channel(q2spi->dev, "rx");
if (IS_ERR_OR_NULL(gsi->rx_c)) {
Q2SPI_ERROR(q2spi, "%s Err Failed to get rx DMA ch %ld\n",
@ -289,7 +288,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
q2spi_kfree(q2spi, q2spi->gsi, __LINE__);
return -EIO;
}
Q2SPI_DEBUG(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c);
Q2SPI_DBG_2(q2spi, "%s gsi_rx_c:%p\n", __func__, gsi->rx_c);
gsi->tx_ev.init.callback = q2spi_gsi_ch_ev_cb;
gsi->tx_ev.init.cb_param = q2spi;
gsi->tx_ev.cmd = MSM_GPI_INIT;
@ -309,7 +308,7 @@ int q2spi_geni_gsi_setup(struct q2spi_geni *q2spi)
Q2SPI_ERROR(q2spi, "%s rx dma slave config ret :%d\n", __func__, ret);
goto dmaengine_slave_config_fail;
}
Q2SPI_DEBUG(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi);
Q2SPI_DBG_1(q2spi, "%s q2spi:%p gsi:%p q2spi_gsi:%p\n", __func__, q2spi, gsi, q2spi->gsi);
q2spi->gsi->chan_setup = true;
return ret;
@ -325,7 +324,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id
struct geni_se *se = &q2spi->se;
int ret = 0;
Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
Q2SPI_DBG_2(q2spi, "%s Start PID=%d\n", __func__, current->pid);
ret = geni_se_clk_freq_match(&q2spi->se, (speed_hz * q2spi->oversampling),
clk_idx, &sclk_freq, false);
@ -345,7 +344,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id
res_freq = (sclk_freq / (*clk_div));
Q2SPI_DEBUG(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n",
Q2SPI_DBG_1(q2spi, "%s req %u resultant %lu sclk %lu, idx %d, div %d\n",
__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
ret = clk_set_rate(se->clk, sclk_freq);
@ -353,7 +352,7 @@ static int get_q2spi_clk_cfg(u32 speed_hz, struct q2spi_geni *q2spi, int *clk_id
Q2SPI_ERROR(q2spi, "%s Err clk_set_rate failed %d\n", __func__, ret);
return ret;
}
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid);
return 0;
}
@ -375,7 +374,6 @@ static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi)
int ssn = S_GP_CNT7_SSN;
int cn_delay = M_GP_CNT6_CN_DELAY;
Q2SPI_DEBUG(q2spi, "%s Start PID=%d\n", __func__, current->pid);
ret = get_q2spi_clk_cfg(q2spi->cur_speed_hz, q2spi, &idx, &div);
if (ret) {
Q2SPI_DEBUG(q2spi, "%s Err setting clks:%d\n", __func__, ret);
@ -386,16 +384,14 @@ static struct msm_gpi_tre *setup_cfg0_tre(struct q2spi_geni *q2spi)
pack |= (GSI_TX_PACK_EN | GSI_RX_PACK_EN);
cs_mode = CS_LESS_MODE;
intr_pol = INTR_HIGH_POLARITY;
Q2SPI_DEBUG(q2spi, "%s cs_mode 0x%x word %d pack %d idx %d div %d\n",
__func__, cs_mode, word_len, pack, idx, div);
/* config0 */
c0_tre->dword[0] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD0(tsn, pack, tdn, cs_mode,
intr_pol, word_len);
c0_tre->dword[1] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD1(tan, cs_clk_delay, ssn);
c0_tre->dword[2] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD2(cn_delay, idx, div);
c0_tre->dword[3] = MSM_GPI_Q2SPI_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
Q2SPI_DEBUG(q2spi, "%s c0_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
__func__, c0_tre->dword[0], c0_tre->dword[1],
Q2SPI_DBG_2(q2spi, "%s cs:x%x word:%d pack:%d idx:%d div:%d dword:0x%x 0x%x 0x%x 0x%x\n",
__func__, cs_mode, word_len, pack, idx, div, c0_tre->dword[0], c0_tre->dword[1],
c0_tre->dword[2], c0_tre->dword[3]);
q2spi->setup_config0 = true;
return c0_tre;
@ -435,19 +431,10 @@ msm_gpi_tre *setup_go_tre(int cmd, int cs, int rx_len, int flags, struct q2spi_g
link_rx = 1;
}
go_tre->dword[3] = MSM_GPI_Q2SPI_GO_TRE_DWORD3(link_rx, 0, eot, eob, chain);
Q2SPI_DEBUG(q2spi, "%s rx len %d flags 0x%x cs %d cmd %d eot %d eob %d chain %d\n",
__func__, rx_len, flags, cs, cmd, eot, eob, chain);
if (cmd == Q2SPI_RX_ONLY)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_RX_ONLY\n", __func__);
else if (cmd == Q2SPI_TX_ONLY)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_ONLY\n", __func__);
else if (cmd == Q2SPI_TX_RX)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_TX_RX_ONLY\n", __func__);
Q2SPI_DEBUG(q2spi, "%s go_tre dword[0]:0x%x [1]:0x%x [2]:0x%x [3]:0x%x\n",
__func__, go_tre->dword[0], go_tre->dword[1], go_tre->dword[2],
go_tre->dword[3]);
Q2SPI_DBG_2(q2spi, "%s len:%d flags:0x%x cs:%d cmd:%d chain %d dword:0x%x 0x%x 0x%x 0x%x\n",
__func__, rx_len, flags, cs, cmd, chain, go_tre->dword[0],
go_tre->dword[1], go_tre->dword[2], go_tre->dword[3]);
return go_tre;
}
@ -463,7 +450,7 @@ msm_gpi_tre *setup_dma_tre(struct msm_gpi_tre *tre, dma_addr_t buf, u32 len,
tre->dword[1] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD1(buf);
tre->dword[2] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD2(len);
tre->dword[3] = MSM_GPI_DMA_W_BUFFER_TRE_DWORD3(0, 0, is_tx, 0, 0);
Q2SPI_DEBUG(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
Q2SPI_DBG_2(q2spi, "%s dma_tre->dword[0]:0x%x dword[1]:0x%x dword[2]:0x%x dword[3]:0x%x\n",
__func__, tre->dword[0], tre->dword[1],
tre->dword[2], tre->dword[3]);
return tre;
@ -480,7 +467,7 @@ int check_gsi_transfer_completion_db_rx(struct q2spi_geni *q2spi)
Q2SPI_DEBUG(q2spi, "%s Rx[%d] timeout%lu\n", __func__, i, timeout);
ret = -ETIMEDOUT;
} else {
Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
Q2SPI_DBG_1(q2spi, "%s rx completed\n", __func__);
}
if (q2spi->gsi->qup_gsi_err) {
@ -499,7 +486,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
unsigned long timeleft = 0, xfer_timeout = 0;
xfer_timeout = XFER_TIMEOUT_OFFSET;
Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__,
Q2SPI_DBG_1(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__,
q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot);
for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) {
timeleft =
@ -509,7 +496,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else if (!q2spi->gsi->qup_gsi_err) {
Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__);
Q2SPI_DBG_1(q2spi, "%s tx completed\n", __func__);
}
}
@ -521,7 +508,7 @@ int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else if (!q2spi->gsi->qup_gsi_err) {
Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
Q2SPI_DBG_1(q2spi, "%s rx completed\n", __func__);
}
}
err_gsi_geni_transfer:
@ -564,10 +551,8 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
xfer = q2spi_pkt->xfer;
cmd = xfer->cmd;
Q2SPI_DEBUG(q2spi, "%s PID=%d xfer:%p vtype=%d\n", __func__,
current->pid, xfer, q2spi_pkt->vtype);
Q2SPI_DEBUG(q2spi, "%s cmd:%d q2spi_pkt:%p\n", __func__, cmd, q2spi_pkt);
Q2SPI_DBG_2(q2spi, "%s PID=%d xfer:%p vtype=%d cmd:%d q2spi_pkt:%p\n", __func__,
current->pid, xfer, q2spi_pkt->vtype, cmd, q2spi_pkt);
q2spi->gsi->num_tx_eot = 0;
q2spi->gsi->num_rx_eot = 0;
q2spi->gsi->qup_gsi_err = false;
@ -605,7 +590,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
tx_nent += 2;
rx_nent++;
}
Q2SPI_DEBUG(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent);
Q2SPI_DBG_2(q2spi, "%s tx_nent:%d rx_nent:%d\n", __func__, tx_nent, rx_nent);
sg_init_table(xfer_tx_sg, tx_nent);
if (rx_nent)
sg_init_table(xfer_rx_sg, rx_nent);
@ -632,7 +617,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
q2spi->gsi->tx_desc->callback_param = &q2spi->gsi->tx_cb_param;
q2spi->gsi->tx_cb_param.userdata = q2spi_pkt;
q2spi->gsi->tx_cookie = dmaengine_submit(q2spi->gsi->tx_desc);
Q2SPI_DEBUG(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param);
Q2SPI_DBG_2(q2spi, "%s Tx cb_param:%p\n", __func__, q2spi->gsi->tx_desc->callback_param);
if (dma_submit_error(q2spi->gsi->tx_cookie)) {
Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n",
__func__, q2spi->gsi->tx_cookie);
@ -660,7 +645,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
q2spi->gsi->rx_cb_param.userdata = q2spi_pkt;
q2spi->gsi->num_rx_eot++;
q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->rx_desc);
Q2SPI_DEBUG(q2spi, "%s Rx cb_param:%p\n", __func__,
Q2SPI_DBG_2(q2spi, "%s Rx cb_param:%p\n", __func__,
q2spi->gsi->rx_desc->callback_param);
if (dma_submit_error(q2spi->gsi->rx_cookie)) {
Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n",
@ -688,7 +673,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
q2spi->gsi->db_rx_cb_param.userdata = q2spi_pkt;
q2spi->gsi->num_rx_eot++;
q2spi->gsi->rx_cookie = dmaengine_submit(q2spi->gsi->db_rx_desc);
Q2SPI_DEBUG(q2spi, "%s DB cb_param:%p\n", __func__,
Q2SPI_DBG_1(q2spi, "%s DB cb_param:%p\n", __func__,
q2spi->gsi->db_rx_desc->callback_param);
if (dma_submit_error(q2spi->gsi->rx_cookie)) {
Q2SPI_DEBUG(q2spi, "%s Err dmaengine_submit failed (%d)\n",
@ -698,7 +683,7 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
}
}
if (cmd & Q2SPI_RX_ONLY) {
Q2SPI_DEBUG(q2spi, "%s rx_c dma_async_issue_pending\n", __func__);
Q2SPI_DBG_1(q2spi, "%s rx_c dma_async_issue_pending\n", __func__);
q2spi_dump_ipc(q2spi, "GSI DMA-RX", (char *)xfer->rx_buf, tx_rx_len);
if (q2spi_pkt->m_cmd_param == Q2SPI_RX_ONLY)
reinit_completion(&q2spi->db_rx_cb);
@ -711,10 +696,10 @@ int q2spi_setup_gsi_xfer(struct q2spi_packet *q2spi_pkt)
q2spi_dump_ipc(q2spi, "GSI DMA TX",
(char *)xfer->tx_buf, Q2SPI_HEADER_LEN + tx_rx_len);
Q2SPI_DEBUG(q2spi, "%s tx_c dma_async_issue_pending\n", __func__);
Q2SPI_DBG_1(q2spi, "%s tx_c dma_async_issue_pending\n", __func__);
reinit_completion(&q2spi->tx_cb);
dma_async_issue_pending(q2spi->gsi->tx_c);
Q2SPI_DEBUG(q2spi, "%s End PID=%d\n", __func__, current->pid);
Q2SPI_DBG_2(q2spi, "%s End PID=%d\n", __func__, current->pid);
return 0;
}
@ -724,11 +709,11 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *
struct q2spi_geni *q2spi = ptr;
int num_crs, i = 0;
Q2SPI_DEBUG(q2spi, "%s event:%s\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event));
Q2SPI_DBG_1(q2spi, "%s event:%s\n", __func__, TO_GPI_CB_EVENT_STR(cb->cb_event));
switch (cb->cb_event) {
case MSM_GPI_QUP_NOTIFY:
case MSM_GPI_QUP_MAX_EVENT:
Q2SPI_DEBUG(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n",
Q2SPI_DBG_1(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n",
__func__, TO_GPI_CB_EVENT_STR(cb->cb_event), cb->status,
cb->timestamp, cb->count);
break;
@ -738,10 +723,10 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *
case MSM_GPI_QUP_PENDING_EVENT:
case MSM_GPI_QUP_EOT_DESC_MISMATCH:
case MSM_GPI_QUP_SW_ERROR:
Q2SPI_DEBUG(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n",
Q2SPI_DBG_1(q2spi, "%s cb_ev %s status %llu ts %llu count %llu\n",
__func__, TO_GPI_CB_EVENT_STR(cb->cb_event), cb->status,
cb->timestamp, cb->count);
Q2SPI_DEBUG(q2spi, "%s err_routine:%u err_type:%u err.code%u\n",
Q2SPI_DBG_2(q2spi, "%s err_routine:%u err_type:%u err.code%u\n",
__func__, cb->error_log.routine, cb->error_log.type,
cb->error_log.error_code);
q2spi->gsi->qup_gsi_err = true;
@ -777,7 +762,7 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *
atomic_set(&q2spi->sma_wr_pending, 1);
}
}
Q2SPI_DEBUG(q2spi, "%s GSI doorbell event, db_pending:%d\n",
Q2SPI_DBG_2(q2spi, "%s GSI doorbell event, db_pending:%d\n",
__func__, atomic_read(&q2spi->doorbell_pending));
q2spi_parse_cr_header(q2spi, cb);
break;

File diff suppressed because it is too large Load diff

View file

@ -207,6 +207,26 @@ if (q2spi_ptr) { \
} \
} while (0)
#define Q2SPI_DBG_1(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
if (q2spi_ptr->q2spi_log_lvl >= LOG_DBG_LEVEL1) {\
GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \
if (q2spi_ptr->dev) \
q2spi_trace_log(q2spi_ptr->dev, x); \
} \
} \
} while (0)
#define Q2SPI_DBG_2(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
if (q2spi_ptr->q2spi_log_lvl >= LOG_DBG_LEVEL2) {\
GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \
if (q2spi_ptr->dev) \
q2spi_trace_log(q2spi_ptr->dev, x); \
} \
} \
} while (0)
#define Q2SPI_DEBUG(q2spi_ptr, x...) do { \
if (q2spi_ptr) { \
GENI_SE_DBG(q2spi_ptr->ipc, false, q2spi_ptr->dev, x); \
@ -255,6 +275,12 @@ enum q2spi_cr_hdr_type {
CR_HDR_EXT = 3,
};
enum DEBUG_LOG_LVL {
LOG_DBG_LEVEL0 = 0, /* Indicates lowest level debug log level, default log level */
LOG_DBG_LEVEL1 = 1,
LOG_DBG_LEVEL2 = 2,
};
struct q2spi_mc_hrf_entry {
u8 cmd:4;
u8 flow:1;
@ -530,6 +556,7 @@ struct q2spi_dma_transfer {
* @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header
* @is_start_seq_fail: start sequence fail due to slave not responding
* @wait_comp_start_fail: completion for transfer callback during start sequence failure
* @q2spi_log_lvl: reflects log level in q2spi driver
*/
struct q2spi_geni {
struct device *wrapper_dev;
@ -638,6 +665,7 @@ struct q2spi_geni {
bool q2spi_cr_hdr_err;
bool is_start_seq_fail;
struct completion wait_comp_start_fail;
u32 q2spi_log_lvl;
};
/**

View file

@ -5475,10 +5475,14 @@ static int msm_geni_serial_port_init(struct platform_device *pdev,
if (dev_port->is_console) {
dev_port->handle_rx = handle_rx_console;
dev_port->rx_fifo = devm_kzalloc(uport->dev, sizeof(u32), GFP_KERNEL);
if (!dev_port->rx_fifo)
return -ENOMEM;
} else {
dev_port->handle_rx = handle_rx_hs;
dev_port->rx_fifo = devm_kzalloc(uport->dev,
sizeof(dev_port->rx_fifo_depth * sizeof(u32)), GFP_KERNEL);
if (!dev_port->rx_fifo)
return -ENOMEM;
if (dev_port->pm_auto_suspend_disable) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_forbid(&pdev->dev);

View file

@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/wait.h>
#include <linux/interconnect.h>
#define MSM_UART_MR1 0x0000
@ -179,6 +180,8 @@ struct msm_port {
bool break_detected;
struct msm_dma tx_dma;
struct msm_dma rx_dma;
/* BLSP UART required ICC BUS voting */
struct icc_path *icc_path;
};
static inline struct msm_port *to_msm_port(struct uart_port *up)
@ -186,6 +189,15 @@ static inline struct msm_port *to_msm_port(struct uart_port *up)
return container_of(up, struct msm_port, uart);
}
/* Interconnect path bandwidths (each times 1000 bytes per second) */
#define BLSP_MEMORY_AVG 500
#define BLSP_MEMORY_PEAK 800
static void msm_clk_bus_unprepare(struct msm_port *msm_uport);
static int msm_clk_bus_prepare(struct msm_port *msm_uport);
static int msm_clk_bus_vote(struct msm_port *msm_uport);
static void msm_clk_bus_unvote(struct msm_port *msm_uport);
static
void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
{
@ -1186,15 +1198,6 @@ static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
return baud;
}
static void msm_init_clock(struct uart_port *port)
{
struct msm_port *msm_port = to_msm_port(port);
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
}
static int msm_startup(struct uart_port *port)
{
struct msm_port *msm_port = to_msm_port(port);
@ -1204,7 +1207,7 @@ static int msm_startup(struct uart_port *port)
snprintf(msm_port->name, sizeof(msm_port->name),
"msm_serial%d", port->line);
msm_init_clock(port);
msm_serial_set_mnd_regs(port);
if (likely(port->fifosize > 12))
rfr_level = port->fifosize - 12;
@ -1241,9 +1244,6 @@ err_irq:
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
clk_disable_unprepare(msm_port->pclk);
clk_disable_unprepare(msm_port->clk);
return ret;
}
@ -1257,8 +1257,6 @@ static void msm_shutdown(struct uart_port *port)
if (msm_port->is_uartdm)
msm_release_dma(msm_port);
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
}
@ -1420,15 +1418,21 @@ static void msm_power(struct uart_port *port, unsigned int state,
unsigned int oldstate)
{
struct msm_port *msm_port = to_msm_port(port);
int ret;
if (oldstate == state)
return;
switch (state) {
case 0:
clk_prepare_enable(msm_port->clk);
clk_prepare_enable(msm_port->pclk);
case UART_PM_STATE_ON:
ret = msm_clk_bus_prepare(msm_port);
if (ret)
break;
msm_clk_bus_vote(msm_port);
break;
case 3:
clk_disable_unprepare(msm_port->clk);
clk_disable_unprepare(msm_port->pclk);
case UART_PM_STATE_OFF:
msm_clk_bus_unvote(msm_port);
msm_clk_bus_unprepare(msm_port);
break;
default:
pr_err("msm_serial: Unknown PM state %d\n", state);
@ -1536,6 +1540,58 @@ static void msm_poll_put_char(struct uart_port *port, unsigned char c)
}
#endif
static void msm_clk_bus_unprepare(struct msm_port *msm_uport)
{
clk_disable_unprepare(msm_uport->clk);
if (msm_uport->pclk)
clk_disable_unprepare(msm_uport->pclk);
}
static int msm_clk_bus_prepare(struct msm_port *msm_uport)
{
int rc;
/* Turn on core clk and iface clk */
if (msm_uport->pclk) {
rc = clk_prepare_enable(msm_uport->pclk);
if (rc) {
dev_err(msm_uport->uart.dev,
"Could not turn on pclk [%d]\n", rc);
return rc;
}
}
rc = clk_prepare_enable(msm_uport->clk);
if (rc) {
dev_err(msm_uport->uart.dev,
"Could not turn on core clk [%d]\n", rc);
if (msm_uport->pclk)
clk_disable_unprepare(msm_uport->pclk);
}
return rc;
}
static int msm_clk_bus_vote(struct msm_port *msm_uport)
{
int rc;
if (msm_uport->icc_path) {
rc = icc_set_bw(msm_uport->icc_path,
BLSP_MEMORY_AVG, BLSP_MEMORY_PEAK);
if (rc) {
dev_err(msm_uport->uart.dev,
"%s(): Error in seting bw [%d]\n", __func__, rc);
return rc;
}
}
return 0;
}
static void msm_clk_bus_unvote(struct msm_port *msm_uport)
{
if (msm_uport->icc_path)
icc_set_bw(msm_uport->icc_path, 0, 0);
}
static const struct uart_ops msm_uart_pops = {
.tx_empty = msm_tx_empty,
.set_mctrl = msm_set_mctrl,
@ -1700,7 +1756,7 @@ static int msm_console_setup(struct console *co, char *options)
if (unlikely(!port->membase))
return -ENXIO;
msm_init_clock(port);
msm_serial_set_mnd_regs(port);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@ -1794,6 +1850,7 @@ static int msm_serial_probe(struct platform_device *pdev)
struct uart_port *port;
const struct of_device_id *id;
int irq, line;
int ret;
if (pdev->dev.of_node)
line = of_alias_get_id(pdev->dev.of_node, "serial");
@ -1818,6 +1875,15 @@ static int msm_serial_probe(struct platform_device *pdev)
else
msm_port->is_uartdm = 0;
msm_port->icc_path = of_icc_get(&pdev->dev, "blsp-ddr");
if (IS_ERR_OR_NULL(msm_port->icc_path)) {
ret = msm_port->icc_path ?
PTR_ERR(msm_port->icc_path) : -EINVAL;
dev_err(&pdev->dev, "%s(): failed to get ICC path: %d\n", __func__, ret);
msm_port->icc_path = NULL;
return -ENXIO;
}
msm_port->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(msm_port->clk))
return PTR_ERR(msm_port->clk);
@ -1918,7 +1984,7 @@ static void __exit msm_serial_exit(void)
uart_unregister_driver(&msm_uart_driver);
}
module_init(msm_serial_init);
subsys_initcall(msm_serial_init);
module_exit(msm_serial_exit);
MODULE_AUTHOR("Robert Love <rlove@google.com>");

View file

@ -3125,6 +3125,7 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
if (assert) {
disable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
disable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
/* Using asynchronous block reset to the hardware */
dev_dbg(mdwc->dev, "block_reset ASSERT\n");
clk_disable_unprepare(mdwc->utmi_clk);
@ -3144,6 +3145,7 @@ static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
clk_prepare_enable(mdwc->core_clk);
clk_prepare_enable(mdwc->sleep_clk);
clk_prepare_enable(mdwc->utmi_clk);
enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
}
@ -4205,6 +4207,7 @@ static void dwc3_msm_suspend_phy(struct dwc3_msm *mdwc)
if (mdwc->lpm_flags & MDWC3_USE_PWR_EVENT_IRQ_FOR_WAKEUP) {
dwc3_msm_set_pwr_events(mdwc, true);
enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
}
}
@ -4223,7 +4226,8 @@ static void dwc3_msm_interrupt_enable(struct dwc3_msm *mdwc, bool enable)
}
}
static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse)
static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse,
bool enable_wakeup)
{
int ret;
struct dwc3 *dwc = NULL;
@ -4346,11 +4350,13 @@ static int dwc3_msm_suspend(struct dwc3_msm *mdwc, bool force_power_collapse)
* case of platforms with mpm interrupts and snps phy, enable
* dpse hsphy irq and dmse hsphy irq as done for pdc interrupts.
*/
dwc3_msm_interrupt_enable(mdwc, true);
dwc3_msm_interrupt_enable(mdwc, enable_wakeup);
if (mdwc->use_pwr_event_for_wakeup &&
!(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND))
!(mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND)) {
enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
}
dev_info(mdwc->dev, "DWC3 in low power mode\n");
dbg_event(0xFF, "Ctl Sus", atomic_read(&mdwc->in_lpm));
@ -4477,6 +4483,7 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
atomic_set(&mdwc->in_lpm, 0);
/* enable power evt irq for IN P3 detection */
enable_irq_wake(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
enable_irq(mdwc->wakeup_irq[PWR_EVNT_IRQ].irq);
/* Disable HSPHY auto suspend and utmi sleep assert */
@ -6216,6 +6223,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
struct resource *res;
int ret = 0, i;
u32 val;
bool disable_wakeup;
mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
if (!mdwc)
@ -6390,7 +6398,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
atomic_set(&mdwc->in_lpm, 1);
pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
pm_runtime_use_autosuspend(mdwc->dev);
device_init_wakeup(mdwc->dev, 1);
disable_wakeup =
device_property_read_bool(mdwc->dev, "qcom,disable-wakeup");
device_init_wakeup(mdwc->dev, !disable_wakeup);
if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
pm_runtime_get_noresume(mdwc->dev);
@ -7465,10 +7476,17 @@ static int dwc3_msm_pm_suspend(struct device *dev)
* Power collapse the core. Hence call dwc3_msm_suspend with
* 'force_power_collapse' set to 'true'.
*/
ret = dwc3_msm_suspend(mdwc, true);
ret = dwc3_msm_suspend(mdwc, true, device_may_wakeup(dev));
if (!ret)
atomic_set(&mdwc->pm_suspended, 1);
/*
* Disable IRQs if not wakeup capable. Wakeup IRQs may sometimes
* be enabled as part of a runtime suspend.
*/
if (!device_may_wakeup(dev))
dwc3_msm_interrupt_enable(mdwc, false);
return ret;
}
@ -7616,7 +7634,7 @@ static int dwc3_msm_runtime_suspend(struct device *dev)
if (dwc)
device_init_wakeup(dwc->dev, false);
return dwc3_msm_suspend(mdwc, false);
return dwc3_msm_suspend(mdwc, false, true);
}
static int dwc3_msm_runtime_resume(struct device *dev)

View file

@ -105,6 +105,51 @@ static unsigned int bitrate(struct usb_gadget *g)
#define RNDIS_STATUS_INTERVAL_MS 32
#define STATUS_BYTECOUNT 8 /* 8 bytes data */
#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(_f_) \
static ssize_t _f_##_opts_wceis_show(struct config_item *item, \
char *page) \
{ \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
bool wceis; \
\
if (opts->bound == false) { \
pr_err("Gadget function do not bind yet.\n"); \
return -ENODEV; \
} \
\
mutex_lock(&opts->lock); \
wceis = opts->wceis; \
mutex_unlock(&opts->lock); \
return snprintf(page, PAGE_SIZE, "%d", wceis); \
} \
\
static ssize_t _f_##_opts_wceis_store(struct config_item *item, \
const char *page, size_t len)\
{ \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
bool wceis; \
int ret; \
\
if (opts->bound == false) { \
pr_err("Gadget function do not bind yet.\n"); \
return -ENODEV; \
} \
\
mutex_lock(&opts->lock); \
\
ret = kstrtobool(page, &wceis); \
if (ret) \
goto out; \
\
opts->wceis = wceis; \
ret = len; \
out: \
mutex_unlock(&opts->lock); \
\
return ret; \
} \
\
CONFIGFS_ATTR(_f_##_opts_, wceis)
/* interface descriptor: */
@ -741,6 +786,27 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
rndis_data_intf.bInterfaceNumber = status;
rndis_union_desc.bSlaveInterface0 = status;
if (rndis_opts->wceis) {
/* "Wireless" RNDIS; auto-detected by Windows */
rndis_iad_descriptor.bFunctionClass =
USB_CLASS_WIRELESS_CONTROLLER;
rndis_iad_descriptor.bFunctionSubClass = 0x01;
rndis_iad_descriptor.bFunctionProtocol = 0x03;
rndis_control_intf.bInterfaceClass =
USB_CLASS_WIRELESS_CONTROLLER;
rndis_control_intf.bInterfaceSubClass = 0x01;
rndis_control_intf.bInterfaceProtocol = 0x03;
} else {
rndis_iad_descriptor.bFunctionClass = USB_CLASS_COMM;
rndis_iad_descriptor.bFunctionSubClass =
USB_CDC_SUBCLASS_ETHERNET;
rndis_iad_descriptor.bFunctionProtocol = USB_CDC_PROTO_NONE;
rndis_control_intf.bInterfaceClass = USB_CLASS_COMM;
rndis_control_intf.bInterfaceSubClass = USB_CDC_SUBCLASS_ACM;
rndis_control_intf.bInterfaceProtocol =
USB_CDC_ACM_PROTO_VENDOR;
}
status = -ENODEV;
/* allocate instance-specific endpoints */
@ -878,6 +944,9 @@ USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, subclass);
/* f_rndis_opts_protocol */
USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(rndis, protocol);
/* f_rndis_opts_wceis */
USB_ETHERNET_CONFIGFS_ITEM_ATTR_WCEIS(rndis);
static struct configfs_attribute *rndis_attrs[] = {
&rndis_opts_attr_dev_addr,
&rndis_opts_attr_host_addr,
@ -886,6 +955,7 @@ static struct configfs_attribute *rndis_attrs[] = {
&rndis_opts_attr_class,
&rndis_opts_attr_subclass,
&rndis_opts_attr_protocol,
&rndis_opts_attr_wceis,
NULL,
};
@ -925,7 +995,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = rndis_free_inst;
opts->net = gether_setup_default();
opts->net = gether_setup_name_default("rndis");
if (IS_ERR(opts->net)) {
struct net_device *net = opts->net;
kfree(opts);
@ -950,6 +1020,9 @@ static struct usb_function_instance *rndis_alloc_inst(void)
}
opts->rndis_interf_group = rndis_interf_group;
/* Enable "Wireless" RNDIS by default */
opts->wceis = true;
return &opts->func_inst;
}

View file

@ -39,6 +39,9 @@ struct f_rndis_opts {
*/
struct mutex lock;
int refcnt;
/* "Wireless" RNDIS; auto-detected by Windows */
bool wceis;
};
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net);

View file

@ -247,4 +247,18 @@ config USB_MSM_EUSB2_PHY
To compile this driver as a module, choose M here.
config USB_MSM_OTG
tristate "Qualcomm Technologies, Inc. on-chip USB OTG controller support"
depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
depends on RESET_CONTROLLER
select USB_PHY
help
Enable this to support USB OTG transceiver on Qualcomm Technologies, Inc.
chips. It handles PHY initialization, clock management, and workarounds
required after resetting the hardware and power management.
This driver is required even for peripheral only or host only
mode configurations.
This driver is not supported on boards like trout which
has an external PHY.
endmenu

View file

@ -30,3 +30,4 @@ obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o
obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o
obj-$(CONFIG_USB_MSM_EUSB2_PHY) += phy-msm-snps-eusb2.o
obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb-v2.o phy-msm-qusb.o
obj-$(CONFIG_USB_MSM_OTG) += phy-msm-usb.o

View file

@ -53,6 +53,8 @@ enum core_ldo_levels {
#define SW_PORTSELECT BIT(0)
/* port select mux: 1 - sw control. 0 - HW control*/
#define SW_PORTSELECT_MX BIT(1)
/* port select polarity: 1 - invert polarity of portselect from gpio */
#define PORTSELECT_POLARITY BIT(2)
/* USB3_DP_PHY_USB3_DP_COM_SWI_CTRL bits */
@ -150,6 +152,7 @@ struct msm_ssphy_qmp {
int reg_offset_cnt;
u32 *qmp_phy_init_seq;
int init_seq_len;
bool invert_ps_polarity;
enum qmp_phy_type phy_type;
};
@ -455,6 +458,14 @@ static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy)
switch (phy->phy_type) {
case USB3_AND_DP:
/*
* if port select inversion is enabled, enable it only for the input to the PHY.
* The lane selection based on PHY flags will not get affected.
*/
if (val < 0 && phy->invert_ps_polarity)
writel_relaxed(PORTSELECT_POLARITY,
phy->base + phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]);
writel_relaxed(0x01,
phy->base + phy->phy_reg[USB3_DP_COM_SW_RESET]);
writel_relaxed(0x00,
@ -1219,7 +1230,8 @@ static int msm_ssphy_qmp_probe(struct platform_device *pdev)
&phy->vdd_max_uA) || !phy->vdd_max_uA)
phy->vdd_max_uA = USB_SSPHY_HPM_LOAD;
platform_set_drvdata(pdev, phy);
phy->invert_ps_polarity = of_property_read_bool(dev->of_node,
"qcom,invert-ps-polarity");
phy->phy.dev = dev;
phy->phy.init = msm_ssphy_qmp_init;

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
*/
@ -67,7 +67,7 @@ int gh_update_vm_prop_table(enum gh_vm_names vm_name,
if (!vm_prop)
return -EINVAL;
if (vm_prop->vmid < 0 || vm_name < GH_SELF_VM || vm_name > GH_VM_MAX)
if (vm_prop->vmid < 0 || vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX)
return -EINVAL;
spin_lock(&gh_vm_table_lock);
@ -129,10 +129,9 @@ int ghd_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
gh_vmid_t _vmid;
int ret = 0;
if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX)
if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX)
return -EINVAL;
spin_lock(&gh_vm_table_lock);
_vmid = gh_vm_table[vm_name].vmid;
@ -196,11 +195,10 @@ int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vm)
if (!vm)
return -EINVAL;
spin_lock(&gh_vm_table_lock);
if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX) {
spin_unlock(&gh_vm_table_lock);
if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX)
return -EINVAL;
}
spin_lock(&gh_vm_table_lock);
vm->guid = gh_vm_table[vm_name].guid;
vm->uri = gh_vm_table[vm_name].uri;
@ -980,7 +978,7 @@ int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid)
/* Look up for the vm_name<->vmid pair if already present.
* If so, return.
*/
if (vm_name < GH_SELF_VM || vm_name > GH_VM_MAX)
if (vm_name < GH_SELF_VM || vm_name >= GH_VM_MAX)
return -EINVAL;
spin_lock(&gh_vm_table_lock);

View file

@ -90,6 +90,7 @@ def define_gen3auto():
"drivers/md/dm-bow.ko",
"drivers/media/platform/msm/npu/msm_npu.ko",
"drivers/mfd/qcom-spmi-pmic.ko",
"drivers/misc/bootmarker_proxy.ko",
"drivers/misc/qseecom_proxy.ko",
"drivers/mmc/host/cqhci.ko",
"drivers/mmc/host/sdhci-msm.ko",

View file

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __BOOTMARKER_KERNEL_H_
#define __BOOTMARKER_KERNEL_H_
#include <linux/types.h>
int bootmarker_place_marker(const char *name);
#if IS_ENABLED(CONFIG_BOOTMARKER_PROXY)
struct bootmarker_drv_ops {
int (*bootmarker_place_marker)(const char *name);
};
int provide_bootmarker_kernel_fun_ops(const struct bootmarker_drv_ops *ops);
#endif /*CONFIG_BOOTMARKER_PROXY*/
#endif /* __BOOTMARKER_KERNEL_H_ */

View file

@ -4,6 +4,8 @@
#include <linux/power_supply.h>
#define BQ27XXX_RESISTANCE_TABLE_LENGTH 15
enum bq27xxx_chip {
BQ27000 = 1, /* bq27000, bq27200 */
BQ27010, /* bq27010, bq27210 */
@ -78,6 +80,10 @@ struct bq27xxx_device_info {
struct list_head list;
struct mutex lock;
u8 *regs;
#ifdef CONFIG_BATTERY_BQ27XXX_RESIST_TABLE_UPDATES_NVM
u32 qmax_cell0;
u32 resist_table[BQ27XXX_RESISTANCE_TABLE_LENGTH];
#endif
};
void bq27xxx_battery_update(struct bq27xxx_device_info *di);

313
include/linux/qca8337.h Normal file
View file

@ -0,0 +1,313 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright (c) 2017, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __QCA8337_H__
#define __QCA8337_H__
#define BITS(_s, _n) (((1UL << (_n)) - 1) << (_s))
#define QCA8337_PHY_ID 0x004dd036
#define ATH8030_PHY_ID 0x004dd076
#define ATH8031_PHY_ID 0x004dd074
#define ATH8035_PHY_ID 0x004dd072
#define QCA8337_ID_QCA8337 0x13
#define QCA8337_NUM_PORTS 7
/* Make sure that port0 is the cpu port */
#define QCA8337_CPU_PORT 0
/* size of the vlan table */
#define QCA8337_MAX_VLANS 128
#define QCA8337_NUM_PHYS 5
#define ADVERTISE_MULTI_PORT_PREFER 0x0400
#define QCA8337_AT803X_INTR_ENABLE 0x12
#define QCA8337_AT803X_INTR_STATUS 0x13
#define QCA8337_AT803X_SMART_SPEED 0x14
#define QCA8337_AT803X_LED_CONTROL 0x18
#define QCA8337_AT803X_WOL_ENABLE 0x01
#define QCA8337_AT803X_DEVICE_ADDR 0x03
#define QCA8337_AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define QCA8337_AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define QCA8337_AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
#define QCA8337_AT803X_MMD_ACCESS_CONTROL 0x0D
#define QCA8337_AT803X_MMD_ACCESS_CONTROL_DATA 0x0E
#define QCA8337_AT803X_FUNC_DATA 0x4003
#define QCA8337_AT803X_INER 0x0012
#define QCA8337_AT803X_INER_INIT 0xec00
#define QCA8337_AT803X_INSR 0x0013
#define QCA8337_AT803X_DEBUG_ADDR 0x1D
#define QCA8337_AT803X_DEBUG_DATA 0x1E
#define QCA8337_AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
#define QCA8337_AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
/* MASK_CTRL */
#define QCA8337_REG_MASK_CTRL 0x0000
#define QCA8337_CTRL_REVISION BITS(0, 8)
#define QCA8337_CTRL_REVISION_S 0
#define QCA8337_CTRL_VERSION BITS(8, 8)
#define QCA8337_CTRL_VERSION_S 8
#define QCA8337_CTRL_RESET BIT(31)
/* PORT0/1_PAD_CTRL */
#define QCA8337_REG_PAD0_CTRL 0x004
#define QCA8337_REG_PAD5_CTRL 0x008
#define QCA8337_REG_PAD6_CTRL 0x00c
#define QCA8337_PAD_MAC_MII_RXCLK_SEL BIT(0)
#define QCA8337_PAD_MAC_MII_TXCLK_SEL BIT(1)
#define QCA8337_PAD_MAC_MII_EN BIT(2)
#define QCA8337_PAD_MAC_GMII_RXCLK_SEL BIT(4)
#define QCA8337_PAD_MAC_GMII_TXCLK_SEL BIT(5)
#define QCA8337_PAD_MAC_GMII_EN BIT(6)
#define QCA8337_PAD_SGMII_EN BIT(7)
#define QCA8337_PAD_PHY_MII_RXCLK_SEL BIT(8)
#define QCA8337_PAD_PHY_MII_TXCLK_SEL BIT(9)
#define QCA8337_PAD_PHY_MII_EN BIT(10)
#define QCA8337_PAD_PHY_GMII_PIPE_RXCLK_SEL BIT(11)
#define QCA8337_PAD_PHY_GMII_RXCLK_SEL BIT(12)
#define QCA8337_PAD_PHY_GMII_TXCLK_SEL BIT(13)
#define QCA8337_PAD_PHY_GMII_EN BIT(14)
#define QCA8337_PAD_PHYX_GMII_EN BIT(16)
#define QCA8337_PAD_PHYX_RGMII_EN BIT(17)
#define QCA8337_PAD_PHYX_MII_EN BIT(18)
#define QCA8337_PAD_RGMII_RXCLK_DELAY_SEL BITS(20, 2)
#define QCA8337_PAD_RGMII_RXCLK_DELAY_SEL_S 20
#define QCA8337_PAD_RGMII_TXCLK_DELAY_SEL BITS(22, 2)
#define QCA8337_PAD_RGMII_TXCLK_DELAY_SEL_S 22
#define QCA8337_PAD_RGMII_RXCLK_DELAY_EN BIT(24)
#define QCA8337_PAD_RGMII_TXCLK_DELAY_EN BIT(25)
#define QCA8337_PAD_RGMII_EN BIT(26)
/* PORT_STATUS */
#define QCA8337_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
#define QCA8337_PORT_STATUS_SPEED BITS(0, 2)
#define QCA8337_PORT_STATUS_SPEED_S 0
#define QCA8337_PORT_STATUS_TXMAC BIT(2)
#define QCA8337_PORT_STATUS_RXMAC BIT(3)
#define QCA8337_PORT_STATUS_TXFLOW BIT(4)
#define QCA8337_PORT_STATUS_RXFLOW BIT(5)
#define QCA8337_PORT_STATUS_DUPLEX BIT(6)
#define QCA8337_PORT_STATUS_LINK_UP BIT(8)
#define QCA8337_PORT_STATUS_LINK_AUTO BIT(9)
#define QCA8337_PORT_STATUS_LINK_PAUSE BIT(10)
/* GLOBAL_FW_CTRL0 */
#define QCA8337_REG_GLOBAL_FW_CTRL0 0x620
#define QCA8337_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10)
/* GLOBAL_FW_CTRL1 */
#define QCA8337_REG_GLOBAL_FW_CTRL1 0x624
#define QCA8337_IGMP_JN_L_DP_SH 24
#define QCA8337_BROAD_DP_SHIFT 16
#define QCA8337_MULTI_FLOOD_DP_SH 8
#define QCA8337_UNI_FLOOD_DP_SHIFT 0
#define QCA8337_IGMP_JOIN_LEAVE_DPALL (0x7f << QCA8337_IGMP_JN_L_DP_SH)
#define QCA8337_BROAD_DPALL (0x7f << QCA8337_BROAD_DP_SHIFT)
#define QCA8337_MULTI_FLOOD_DPALL (0x7f << QCA8337_MULTI_FLOOD_DP_SH)
#define QCA8337_UNI_FLOOD_DPALL (0x7f << QCA8337_UNI_FLOOD_DP_SHIFT)
/* PWS_REG (POWER_ON_STRIP) */
#define QCA8337_REG_POWER_ON_STRIP 0x010
#define QCA8337_REG_POS_VAL 0x261320
#define QCA8337_PWS_POWER_ON_SEL BIT(31)
#define QCA8337_PWS_LED_OPEN_EN BIT(24)
#define QCA8337_PWS_SERDES_AEN BIT(7)
/* MAC_PWR_SEL*/
#define QCA8337_MAC_PWR_SEL 0x0e4
#define QCA8337_MAC_PWR_SEL_VAL 0xaa545
/* SGMII_CTRL */
#define QCA8337_SGMII_CTRL_REG 0x0e0
#define QCA8337_SGMII_CTRL_VAL 0xc74164de
#define QCA8337_SGMII_CTRL_MODE_CTRL BITS(22, 2)
#define QCA8337_SGMII_CTRL_MODE_CTRL_S 22
#define QCA8337_SGMII_EN_LCKDT BIT(0)
#define QCA8337_SGMII_EN_PLL BIT(1)
#define QCA8337_SGMII_EN_RX BIT(2)
#define QCA8337_SGMII_EN_TX BIT(3)
#define QCA8337_SGMII_EN_SD BIT(4)
#define QCA8337_SGMII_BW_HIGH BIT(6)
#define QCA8337_SGMII_SEL_CLK125M BIT(7)
#define QCA8337_SGMII_TXDR_CTRL_600mV BIT(10)
#define QCA8337_SGMII_CDR_BW_8 BIT(13)
#define QCA8337_SGMII_DIS_AUTO_LPI_25M BIT(16)
#define QCA8337_SGMII_MODE_CTRL_SGMII_PHY BIT(22)
#define QCA8337_SGMII_PAUSE_SG_TX_EN_25M BIT(24)
#define QCA8337_SGMII_ASYM_PAUSE_25M BIT(25)
#define QCA8337_SGMII_PAUSE_25M BIT(26)
#define QCA8337_SGMII_HALF_DUPLEX_25M BIT(30)
#define QCA8337_SGMII_FULL_DUPLEX_25M BIT(31)
/* PORT_LOOKUP_CTRL */
#define QCA8337_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc)
#define QCA8337_PORT_LOOKUP_MEMBER BITS(0, 7)
#define QCA8337_PORT_LOOKUP_IN_MODE BITS(8, 2)
#define QCA8337_PORT_LOOKUP_IN_MODE_S 8
#define QCA8337_PORT_LOOKUP_STATE BITS(16, 3)
#define QCA8337_PORT_LOOKUP_STATE_S 16
#define QCA8337_PORT_LOOKUP_LEARN BIT(20)
/* PORT_VLAN_CTRL0 */
#define QCA8337_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8)
#define QCA8337_PORT_VLAN0_DEF_SVID BITS(0, 12)
#define QCA8337_PORT_VLAN0_DEF_SVID_S 0
#define QCA8337_PORT_VLAN0_DEF_CVID BITS(16, 12)
#define QCA8337_PORT_VLAN0_DEF_CVID_S 16
/* PORT_VLAN_CTRL1 */
#define QCA8337_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8)
#define QCA8337_PORT_VLAN1_PORT_VLAN_PROP BIT(6)
#define QCA8337_PORT_VLAN1_OUT_MODE BITS(12, 2)
#define QCA8337_PORT_VLAN1_OUT_MODE_S 12
#define QCA8337_PORT_VLAN1_OUT_MODE_UNMOD 0
#define QCA8337_PORT_VLAN1_OUT_MODE_UNTAG 1
#define QCA8337_PORT_VLAN1_OUT_MODE_TAG 2
#define QCA8337_PORT_VLAN1_OUT_MODE_UNTOUCH 3
/* MODULE_EN */
#define QCA8337_REG_MODULE_EN 0x030
#define QCA8337_MODULE_EN_MIB BIT(0)
/* MIB */
#define QCA8337_REG_MIB 0x034
#define QCA8337_MIB_FLUSH BIT(24)
#define QCA8337_MIB_CPU_KEEP BIT(20)
#define QCA8337_MIB_BUSY BIT(17)
/* PORT_HEADER_CTRL */
#define QCA8337_REG_PORT_HEADER(_i) (0x09c + (_i) * 4)
#define QCA8337_PORT_HDR_CTRL_RX_S 2
#define QCA8337_PORT_HDR_CTRL_TX_S 0
#define QCA8337_PORT_HDR_CTRL_ALL 2
/* EEE_CTRL */
#define QCA8337_REG_EEE_CTRL 0x100
#define QCA8337_EEE_CTRL_DISABLE 0x0 /*EEE disable*/
/* VTU_FUNC_REG0 */
#define QCA8337_REG_VTU_FUNC0 0x0610
#define QCA8337_VTU_FUNC0_EG_MODE BITS(4, 14)
#define QCA8337_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2)
#define QCA8337_VTU_FUNC0_EG_MODE_KEEP 0
#define QCA8337_VTU_FUNC0_EG_MODE_UNTAG 1
#define QCA8337_VTU_FUNC0_EG_MODE_TAG 2
#define QCA8337_VTU_FUNC0_EG_MODE_NOT 3
#define QCA8337_VTU_FUNC0_IVL BIT(19)
#define QCA8337_VTU_FUNC0_VALID BIT(20)
/* VTU_FUNC_REG1 */
#define QCA8337_REG_VTU_FUNC1 0x0614
#define QCA8337_VTU_FUNC1_OP BITS(0, 3)
#define QCA8337_VTU_FUNC1_OP_NOOP 0
#define QCA8337_VTU_FUNC1_OP_FLUSH 1
#define QCA8337_VTU_FUNC1_OP_LOAD 2
#define QCA8337_VTU_FUNC1_OP_PURGE 3
#define QCA8337_VTU_FUNC1_OP_REMOVE_PORT 4
#define QCA8337_VTU_FUNC1_OP_GET_NEXT 5
#define QCA8337_VTU_FUNC1_OP_GET_ONE 6
#define QCA8337_VTU_FUNC1_FULL BIT(4)
#define QCA8337_VTU_FUNC1_PORT BIT(8, 4)
#define QCA8337_VTU_FUNC1_PORT_S 8
#define QCA8337_VTU_FUNC1_VID BIT(16, 12)
#define QCA8337_VTU_FUNC1_VID_S 16
#define QCA8337_VTU_FUNC1_BUSY BIT(31)
#define QCA8337_REG_ATU_FUNC 0x60c
#define QCA8337_ATU_FUNC_BUSY BIT(31)
#define QCA8337_ATU_FUNC_OP_GET_NEXT 0x6
#define QCA8337_REG_ATU_DATA0 0x600
#define QCA8337_REG_ATU_DATA1 0x604
#define QCA8337_REG_ATU_DATA2 0x608
#define QCA8337_GLOBAL_INT1 0x0024
#define QCA8337_GLOBAL_INT1_MASK 0x002c
/* port speed */
enum {
QCA8337_PORT_SPEED_10M = 0,
QCA8337_PORT_SPEED_100M = 1,
QCA8337_PORT_SPEED_1000M = 2,
QCA8337_PORT_SPEED_ERR = 3,
};
/* ingress 802.1q mode */
enum {
QCA8337_IN_PORT_ONLY = 0,
QCA8337_IN_PORT_FALLBACK = 1,
QCA8337_IN_VLAN_ONLY = 2,
QCA8337_IN_SECURE = 3
};
/* egress 802.1q mode */
enum {
QCA8337_OUT_KEEP = 0,
QCA8337_OUT_STRIP_VLAN = 1,
QCA8337_OUT_ADD_VLAN = 2
};
/* port forwarding state */
enum {
QCA8337_PORT_STATE_DISABLED = 0,
QCA8337_PORT_STATE_BLOCK = 1,
QCA8337_PORT_STATE_LISTEN = 2,
QCA8337_PORT_STATE_LEARN = 3,
QCA8337_PORT_STATE_FORWARD = 4
};
struct qca8337_priv;
struct qca8337_switch_ops {
int (*hw_init)(struct qca8337_priv *priv);
void (*reset_switch)(struct qca8337_priv *priv);
/* Switch internal register read/write function */
u32 (*read)(struct qca8337_priv *priv, u32 reg);
void (*write)(struct qca8337_priv *priv, u32 reg, u32 val);
};
struct port_link_info {
bool link;
int speed;
int duplex;
int aneg;
int rx_flow;
int tx_flow;
};
struct qca8337_priv {
struct device *dev;
struct phy_device *phy;
u8 chip_ver;
u8 chip_rev;
u8 cpu_port;
u8 ports;
u16 vlans;
u8 num_phy;
u32 old_port_status;
char buf[2048];
struct qca8337_switch_ops *ops;
struct regmap *regmap;
};
struct qca8337_mib_desc {
unsigned int size;
unsigned int offset;
const char *name;
};
void qca8337_check(void);
u32 qca8337_read(struct qca8337_priv *priv, u32 reg);
void qca8337_write(struct qca8337_priv *priv, u32 reg, u32 val);
#endif /*__QCA8337_H__*/

View file

@ -308,5 +308,7 @@ struct plat_stmmacenet_data {
bool mdio_op_busy;
atomic_t phy_clks_suspended;
struct completion mdio_op;
int board_type;
int phy_type;
};
#endif

View file

@ -0,0 +1,390 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ASM_ARCH_MSM_HSUSB_H
#define __ASM_ARCH_MSM_HSUSB_H
#include <linux/types.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/clk.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#include <linux/power_supply.h>
#include <linux/cdev.h>
#include <linux/usb_bam.h>
#include <linux/extcon.h>
#include <linux/regulator/driver.h>
#include <linux/interconnect.h>
/**
* Used different VDDCX voltage values
*/
enum usb_vdd_value {
VDD_NONE = 0,
VDD_MIN,
VDD_MAX,
VDD_VAL_MAX,
};
/**
* Requested USB votes for NOC frequency
*
* USB_NOC_NOM_VOTE Vote for NOM set of NOC frequencies
* USB_NOC_SVS_VOTE Vote for SVS set of NOC frequencies
*
*/
enum usb_noc_mode {
USB_NOC_NOM_VOTE = 0,
USB_NOC_SVS_VOTE,
USB_NOC_NUM_VOTE,
};
/**
* Different states involved in USB charger detection.
*
* USB_CHG_STATE_UNDEFINED USB charger is not connected or detection
* process is not yet started.
* USB_CHG_STATE_IN_PROGRESS Charger detection in progress
* USB_CHG_STATE_WAIT_FOR_DCD Waiting for Data pins contact.
* USB_CHG_STATE_DCD_DONE Data pin contact is detected.
* USB_CHG_STATE_PRIMARY_DONE Primary detection is completed (Detects
* between SDP and DCP/CDP).
* USB_CHG_STATE_SECONDARY_DONE Secondary detection is completed (Detects
* between DCP and CDP).
* USB_CHG_STATE_DETECTED USB charger type is determined.
* USB_CHG_STATE_QUEUE_SM_WORK SM work to start/stop gadget is queued.
*
*/
enum usb_chg_state {
USB_CHG_STATE_UNDEFINED = 0,
USB_CHG_STATE_IN_PROGRESS,
USB_CHG_STATE_WAIT_FOR_DCD,
USB_CHG_STATE_DCD_DONE,
USB_CHG_STATE_PRIMARY_DONE,
USB_CHG_STATE_SECONDARY_DONE,
USB_CHG_STATE_DETECTED,
USB_CHG_STATE_QUEUE_SM_WORK,
};
/**
* USB charger types
*
* USB_INVALID_CHARGER Invalid USB charger.
* USB_SDP_CHARGER Standard downstream port. Refers to a downstream port
* on USB2.0 compliant host/hub.
* USB_DCP_CHARGER Dedicated charger port (AC charger/ Wall charger).
* USB_CDP_CHARGER Charging downstream port. Enumeration can happen and
* IDEV_CHG_MAX can be drawn irrespective of USB state.
* USB_NONCOMPLIANT_CHARGER A non-compliant charger pull DP and DM to specific
* voltages between 2.0-3.3v for identification.
*
*/
enum usb_chg_type {
USB_INVALID_CHARGER = 0,
USB_SDP_CHARGER,
USB_DCP_CHARGER,
USB_CDP_CHARGER,
USB_NONCOMPLIANT_CHARGER,
USB_FLOATED_CHARGER,
};
/**
* Maintain state for hvdcp external charger status
* DEFAULT This is used when DCP is detected
* ACTIVE This is used when ioctl is called to block LPM
* INACTIVE This is used when ioctl is called to unblock LPM
*/
enum usb_ext_chg_status {
DEFAULT = 1,
ACTIVE,
INACTIVE,
};
/**
* USB ID state
*/
enum usb_id_state {
USB_ID_GROUND = 0,
USB_ID_FLOAT,
};
#define USB_NUM_BUS_CLOCKS 3
/**
* struct msm_otg: OTG driver data. Shared by HCD and DCD.
* @otg: USB OTG Transceiver structure.
* @pdata: otg device platform data.
* @irq: IRQ number assigned for HSUSB controller.
* @async_irq: IRQ number used by some controllers during low power state
* @phy_irq: IRQ number assigned for PHY to notify events like id and line
state changes.
* @pclk: clock struct of iface_clk.
* @core_clk: clock struct of core_bus_clk.
* @sleep_clk: clock struct of sleep_clk for USB PHY.
* @phy_reset_clk: clock struct of phy_reset_clk for USB PHY. This clock is
a reset only clock and resets the PHY, ULPI bridge and
CSR wrapper.
* @phy_por_clk: clock struct of phy_por_clk for USB PHY. This clock is
a reset only clock and resets only the PHY (POR).
* @phy_csr_clk: clock struct of phy_csr_clk for USB PHY. This clock is
required to access PHY CSR registers via AHB2PHY interface.
* @bus_clks: bimc/snoc/pcnoc clock struct.
* @core_reset: Reset control for core_clk
* @phy_reset: Reset control for phy_reset_clk
* @phy_por_reset: Reset control for phy_por_clk
* @default_noc_mode: default frequency for NOC clocks - SVS or NOM
* @core_clk_rate: core clk max frequency
* @regs: ioremapped register base address.
* @usb_phy_ctrl_reg: relevant PHY_CTRL_REG register base address.
* @inputs: OTG state machine inputs(Id, SessValid etc).
* @sm_work: OTG state machine work.
* @sm_work_pending: OTG state machine work is pending, queued post pm_resume
* @resume_pending: USB h/w lpm_exit pending. Done on next sm_work run
* @pm_suspended: OTG device is system(PM) suspended.
* @pm_notify: Notifier to receive system wide PM transition events.
It is used to defer wakeup events processing until
system is RESUMED.
* @in_lpm: indicates low power mode (LPM) state.
* @async_int: IRQ line on which ASYNC interrupt arrived in LPM.
* @cur_power: The amount of mA available from downstream port.
* @otg_wq: Strict order otg workqueue for OTG works (SM/ID/SUSPEND).
* @chg_work: Charger detection work.
* @chg_state: The state of charger detection process.
* @chg_type: The type of charger attached.
* @chg_detection: True if PHY is doing charger type detection.
* @bus_perf_client: Bus performance client handle to request BUS bandwidth
* @host_bus_suspend: indicates host bus suspend or not.
* @device_bus_suspend: indicates device bus suspend or not.
* @bus_clks_enabled: indicates pcnoc/snoc/bimc clocks are on or not.
* @is_ext_chg_dcp: To indicate whether charger detected by external entity
SMB hardware is DCP charger or not.
* @ext_id_irq: IRQ for ID interrupt.
* @phy_irq_pending: Gets set when PHY IRQ arrives in LPM.
* @id_state: Indicates USBID line status.
* @rm_pulldown: Indicates pulldown status on D+ and D- data lines.
* @dpdm_desc: Regulator descriptor for D+ and D- voting.
* @dpdm_rdev: Regulator class device for dpdm regulator.
* @dbg_idx: Dynamic debug buffer Index.
* @dbg_lock: Dynamic debug buffer Lock.
* @buf: Dynamic Debug Buffer.
* @max_nominal_system_clk_rate: max freq at which system clock can run in
nominal mode.
* @sdp_check: SDP detection work in case of USB_FLOAT power supply
* @notify_charger_work: Charger notification work.
*/
struct msm_otg {
struct usb_phy phy;
struct msm_otg_platform_data *pdata;
struct platform_device *pdev;
struct mutex lock;
int irq;
int async_irq;
int phy_irq;
struct clk *xo_clk;
struct clk *pclk;
struct clk *core_clk;
struct clk *sleep_clk;
struct clk *phy_reset_clk;
struct clk *phy_por_clk;
struct clk *phy_csr_clk;
struct clk *bus_clks[USB_NUM_BUS_CLOCKS];
struct clk *phy_ref_clk;
struct reset_control *core_reset;
struct reset_control *phy_reset;
struct reset_control *phy_por_reset;
long core_clk_rate;
long core_clk_svs_rate;
long core_clk_nominal_rate;
enum usb_noc_mode default_noc_mode;
struct resource *io_res;
void __iomem *regs;
void __iomem *phy_csr_regs;
void __iomem *usb_phy_ctrl_reg;
#define ID 0
#define B_SESS_VLD 1
#define A_BUS_SUSPEND 14
unsigned long inputs;
struct work_struct sm_work;
bool sm_work_pending;
bool resume_pending;
atomic_t pm_suspended;
struct notifier_block pm_notify;
atomic_t in_lpm;
bool err_event_seen;
int async_int;
unsigned int cur_power;
struct workqueue_struct *otg_wq;
struct delayed_work chg_work;
struct delayed_work id_status_work;
enum usb_chg_state chg_state;
enum usb_chg_type chg_type;
bool chg_detection;
unsigned int dcd_time;
unsigned long caps;
uint32_t bus_perf_client;
bool host_bus_suspend;
bool device_bus_suspend;
bool bus_clks_enabled;
/*
* Allowing PHY power collpase turns off the HSUSB 3.3v and 1.8v
* analog regulators while going to low power mode.
* Currently only 28nm PHY has the support to allowing PHY
* power collapse since it doesn't have leakage currents while
* turning off the power rails.
*/
#define ALLOW_PHY_POWER_COLLAPSE BIT(0)
/*
* Allow PHY RETENTION mode before turning off the digital
* voltage regulator(VDDCX).
*/
#define ALLOW_PHY_RETENTION BIT(1)
/*
* Allow putting the core in Low Power mode, when
* USB bus is suspended but cable is connected.
*/
#define ALLOW_LPM_ON_DEV_SUSPEND BIT(2)
/*
* Allowing PHY regulators LPM puts the HSUSB 3.3v and 1.8v
* analog regulators into LPM while going to USB low power mode.
*/
#define ALLOW_PHY_REGULATORS_LPM BIT(3)
/*
* Allow PHY RETENTION mode before turning off the digital
* voltage regulator(VDDCX) during host mode.
*/
#define ALLOW_HOST_PHY_RETENTION BIT(4)
/*
* Allow VDD minimization without putting PHY into retention
* for fixing PHY current leakage issue when LDOs ar turned off.
*/
#define ALLOW_VDD_MIN_WITH_RETENTION_DISABLED BIT(5)
/*
* PHY can keep D+ pull-up during peripheral bus suspend and
* D+/D- pull-down during host bus suspend without any
* re-work. This is possible only when PHY DVDD is supplied
* by a PMIC LDO (unlike VDDCX/VDDMX).
*/
#define ALLOW_BUS_SUSPEND_WITHOUT_REWORK BIT(6)
unsigned long lpm_flags;
#define PHY_PWR_COLLAPSED BIT(0)
#define PHY_RETENTIONED BIT(1)
#define XO_SHUTDOWN BIT(2)
#define CLOCKS_DOWN BIT(3)
#define PHY_REGULATORS_LPM BIT(4)
int reset_counter;
unsigned int online;
dev_t ext_chg_dev;
struct pinctrl *phy_pinctrl;
bool is_ext_chg_dcp;
struct qpnp_vadc_chip *vadc_dev;
int ext_id_irq;
bool phy_irq_pending;
enum usb_id_state id_state;
bool rm_pulldown;
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
/* Maximum debug message length */
#define DEBUG_MSG_LEN 128UL
/* Maximum number of messages */
#define DEBUG_MAX_MSG 256UL
unsigned int dbg_idx;
rwlock_t dbg_lock;
char (buf[DEBUG_MAX_MSG])[DEBUG_MSG_LEN]; /* buffer */
unsigned int vbus_state;
unsigned int usb_irq_count;
int pm_qos_latency;
unsigned int notify_current_mA;
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
struct delayed_work sdp_check;
struct work_struct notify_charger_work;
bool enable_sdp_check_timer;
struct icc_path *icc_paths;
};
struct ci13xxx_platform_data {
u8 usb_core_id;
int *tlmm_init_seq;
int tlmm_seq_count;
/*
* value of 2^(log2_itc-1) will be used as the interrupt threshold
* (ITC), when log2_itc is between 1 to 7.
*/
int log2_itc;
bool l1_supported;
bool enable_ahb2ahb_bypass;
bool enable_streaming;
bool enable_axi_prefetch;
};
/**
* struct msm_hsic_host_platform_data - platform device data
* for msm_hsic_host driver.
* @phy_sof_workaround: Enable ALL PHY SOF bug related workarounds for
* SUSPEND, RESET and RESUME.
* @phy_susp_sof_workaround: Enable PHY SOF workaround for
* SUSPEND.
* @phy_reset_sof_workaround: Enable PHY SOF workaround for
* RESET.
* @dis_internal_clk_gating: If set, internal clock gating in controller
* is disabled.
*
*/
struct msm_hsic_host_platform_data {
unsigned int strobe;
unsigned int data;
bool ignore_cal_pad_config;
bool phy_sof_workaround;
bool dis_internal_clk_gating;
bool phy_susp_sof_workaround;
bool phy_reset_sof_workaround;
u32 reset_delay;
int strobe_pad_offset;
int data_pad_offset;
struct msm_bus_scale_pdata *bus_scale_table;
unsigned int log2_irq_thresh;
/* gpio used to resume peripheral */
unsigned int resume_gpio;
int *tlmm_init_seq;
int tlmm_seq_count;
/*swfi latency is required while driving resume on to the bus */
u32 swfi_latency;
/*standalone latency is required when HSCI is active*/
u32 standalone_latency;
bool pool_64_bit_align;
bool enable_hbm;
bool disable_park_mode;
bool consider_ipa_handshake;
bool ahb_async_bridge_bypass;
bool disable_cerr;
};
#ifdef CONFIG_USB_BAM
void msm_bam_set_usb_host_dev(struct device *dev);
int msm_do_bam_disable_enable(enum usb_ctrl ctrl);
#else
static inline void msm_bam_set_usb_host_dev(struct device *dev) {}
static inline int msm_do_bam_disable_enable(enum usb_ctrl ctrl)
{ return true; }
#endif
#ifdef CONFIG_USB_CI13XXX_MSM
void msm_hw_soft_reset(void);
#else
static inline void msm_hw_soft_reset(void)
{
}
#endif
#endif

View file

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __LINUX_USB_GADGET_MSM72K_UDC_H__
#define __LINUX_USB_GADGET_MSM72K_UDC_H__
/* USB phy selector - in TCSR address range */
#define USB2_PHY_SEL 0xfd4ab000
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
#define USB_GENCONFIG (MSM_USB_BASE + 0x009C)
#define USB_GENCONFIG_2 (MSM_USB_BASE + 0x00a0)
#define USB_HS_GPTIMER_BASE (MSM_USB_BASE + 0x80)
#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
#define USB_HS_APF_CTRL (MSM_USB_BASE + 0x0380)
#define APF_CTRL_EN BIT(0)
#define USB_USBCMD (MSM_USB_BASE + 0x0140)
#define USB_USBSTS (MSM_USB_BASE + 0x0144)
#define USB_PORTSC (MSM_USB_BASE + 0x0184)
#define USB_OTGSC (MSM_USB_BASE + 0x01A4)
#define USB_USBMODE (MSM_USB_BASE + 0x01A8)
#define USB_PHY_CTRL (MSM_USB_BASE + 0x0240)
#define USB_PHY_CTRL2 (MSM_USB_BASE + 0x0278)
#define GENCONFIG_2_SESS_VLD_CTRL_EN BIT(7)
#define GENCONFIG_2_LINESTATE_DIFF_WAKEUP_EN BIT(12)
#define GENCONFIG_2_SYS_CLK_HOST_DEV_GATE_EN BIT(13)
#define GENCONFIG_2_DPSE_DMSE_HV_INTR_EN BIT(15)
#define USBCMD_SESS_VLD_CTRL BIT(25)
#define USBCMD_RESET 2
#define USB_USBINTR (MSM_USB_BASE + 0x0148)
#define USB_FRINDEX (MSM_USB_BASE + 0x014C)
#define AHB2AHB_BYPASS BIT(31)
#define AHB2AHB_BYPASS_BIT_MASK BIT(31)
#define AHB2AHB_BYPASS_CLEAR (0 << 31)
#define USB_L1_EP_CTRL (MSM_USB_BASE + 0x0250)
#define USB_L1_CONFIG (MSM_USB_BASE + 0x0254)
#define L1_CONFIG_LPM_EN BIT(4)
#define L1_CONFIG_REMOTE_WAKEUP BIT(5)
#define L1_CONFIG_GATE_SYS_CLK BIT(7)
#define L1_CONFIG_PHY_LPM BIT(10)
#define L1_CONFIG_PLL BIT(11)
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define PORTSC_PTS_MASK (3 << 30)
#define PORTSC_PTS_ULPI (2 << 30)
#define PORTSC_PTS_SERIAL (3 << 30)
#define PORTSC_LS (3 << 10)
#define PORTSC_LS_DM (1 << 10)
#define PORTSC_CCS (1 << 0)
#define USB_ULPI_VIEWPORT (MSM_USB_BASE + 0x0170)
#define ULPI_RUN (1 << 30)
#define ULPI_WRITE (1 << 29)
#define ULPI_READ (0 << 29)
#define ULPI_SYNC_STATE (1 << 27)
#define ULPI_ADDR(n) (((n) & 255) << 16)
#define ULPI_DATA(n) ((n) & 255)
#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
#define GENCONFIG_BAM_DISABLE (1 << 13)
#define GENCONFIG_TXFIFO_IDLE_FORCE_DISABLE (1 << 4)
#define GENCONFIG_ULPI_SERIAL_EN (1 << 5)
/* synopsys 28nm phy registers */
#define ULPI_PWR_CLK_MNG_REG 0x88
#define OTG_COMP_DISABLE BIT(0)
#define ULPI_MISC_A 0x96
#define ULPI_MISC_A_VBUSVLDEXTSEL BIT(1)
#define ULPI_MISC_A_VBUSVLDEXT BIT(0)
#define ASYNC_INTR_CTRL (1 << 29) /* Enable async interrupt */
#define ULPI_STP_CTRL (1 << 30) /* Block communication with PHY */
#define PHY_RETEN (1 << 1) /* PHY retention enable/disable */
#define PHY_IDHV_INTEN (1 << 8) /* PHY ID HV interrupt */
#define PHY_OTGSESSVLDHV_INTEN (1 << 9) /* PHY Session Valid HV int. */
#define PHY_CLAMP_DPDMSE_EN (1 << 21) /* PHY mpm DP DM clamp enable */
#define PHY_POR_BIT_MASK BIT(0)
#define PHY_POR_ASSERT (1 << 0) /* USB2 28nm PHY POR ASSERT */
#define PHY_POR_DEASSERT (0 << 0) /* USB2 28nm PHY POR DEASSERT */
/* OTG definitions */
#define OTGSC_INTSTS_MASK (0x7f << 16)
#define OTGSC_IDPU (1 << 5)
#define OTGSC_ID (1 << 8)
#define OTGSC_BSV (1 << 11)
#define OTGSC_IDIS (1 << 16)
#define OTGSC_BSVIS (1 << 19)
#define OTGSC_IDIE (1 << 24)
#define OTGSC_BSVIE (1 << 27)
/* USB PHY CSR registers and bit definitions */
#define USB_PHY_CSR_PHY_UTMI_CTRL0 (MSM_USB_PHY_CSR_BASE + 0x060)
#define TERM_SEL BIT(6)
#define SLEEP_M BIT(1)
#define PORT_SELECT BIT(2)
#define OP_MODE_MASK 0x30
#define USB_PHY_CSR_PHY_UTMI_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x064)
#define DM_PULLDOWN BIT(3)
#define DP_PULLDOWN BIT(2)
#define XCVR_SEL_MASK 0x3
#define USB_PHY_CSR_PHY_UTMI_CTRL2 (MSM_USB_PHY_CSR_BASE + 0x068)
#define USB_PHY_CSR_PHY_UTMI_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x06c)
#define USB_PHY_CSR_PHY_UTMI_CTRL4 (MSM_USB_PHY_CSR_BASE + 0x070)
#define TX_VALID BIT(0)
#define USB_PHY_CSR_PHY_CTRL_COMMON0 (MSM_USB_PHY_CSR_BASE + 0x078)
#define SIDDQ BIT(2)
#define USB_PHY_CSR_PHY_CTRL1 (MSM_USB_PHY_CSR_BASE + 0x08C)
#define ID_HV_CLAMP_EN_N BIT(1)
#define USB_PHY_CSR_PHY_CTRL2 (MSM_USB_PHY_CSR_BASE + 0x090)
#define USB2_SUSPEND_N BIT(6)
#define USB_PHY_CSR_PHY_CTRL3 (MSM_USB_PHY_CSR_BASE + 0x094)
#define CLAMP_MPM_DPSE_DMSE_EN_N BIT(2)
#define USB_PHY_CSR_PHY_CFG0 (MSM_USB_PHY_CSR_BASE + 0x0c4)
#define USB2_PHY_USB_PHY_IRQ_CMD (MSM_USB_PHY_CSR_BASE + 0x0D0)
#define USB2_PHY_USB_PHY_INTERRUPT_SRC_STATUS (MSM_USB_PHY_CSR_BASE + 0x05C)
#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR0 (MSM_USB_PHY_CSR_BASE + 0x0DC)
#define USB2_PHY_USB_PHY_DPDM_CLEAR_MASK 0x1E
#define USB2_PHY_USB_PHY_INTERRUPT_CLEAR1 (MSM_USB_PHY_CSR_BASE + 0x0E0)
#define USB2_PHY_USB_PHY_INTERRUPT_MASK0 (MSM_USB_PHY_CSR_BASE + 0x0D4)
#define USB2_PHY_USB_PHY_DP_1_0_MASK BIT(4)
#define USB2_PHY_USB_PHY_DP_0_1_MASK BIT(3)
#define USB2_PHY_USB_PHY_DM_1_0_MASK BIT(2)
#define USB2_PHY_USB_PHY_DM_0_1_MASK BIT(1)
#define USB2_PHY_USB_PHY_INTERRUPT_MASK1 (MSM_USB_PHY_CSR_BASE + 0x0D8)
#define USB_PHY_IDDIG_1_0 BIT(7)
#define USB_PHY_IDDIG_RISE_MASK BIT(0)
#define USB_PHY_IDDIG_FALL_MASK BIT(1)
#define USB_PHY_ID_MASK (USB_PHY_IDDIG_RISE_MASK | USB_PHY_IDDIG_FALL_MASK)
#define ENABLE_DP_MANUAL_PULLUP BIT(0)
#define ENABLE_SECONDARY_PHY BIT(1)
#define PHY_SOFT_CONNECT BIT(12)
/*
* The following are bit fields describing the usb_request.udc_priv word.
* These bit fields are set by function drivers that wish to queue
* usb_requests with sps/bam parameters.
*/
#define MSM_TX_PIPE_ID_OFS (16)
#define MSM_SPS_MODE BIT(5)
#define MSM_IS_FINITE_TRANSFER BIT(6)
#define MSM_PRODUCER BIT(7)
#define MSM_DISABLE_WB BIT(8)
#define MSM_ETD_IOC BIT(9)
#define MSM_INTERNAL_MEM BIT(10)
#define MSM_VENDOR_ID BIT(16)
#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */

Some files were not shown because too many files have changed in this diff Show more