Merge db24726bfe ("Merge tag 'integrity-v5.12-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity") into android-mainline
Steps on the way to 5.12-rc5 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Id09d774493bcb5ccf3a797a200fbf36955f19bea
This commit is contained in:
commit
469c64d064
257 changed files with 2434 additions and 1001 deletions
3
.mailmap
3
.mailmap
|
|
@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
|
|||
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||
Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||
Andy Adamson <andros@citi.umich.edu>
|
||||
|
|
@ -65,6 +66,8 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
|
|||
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
|
||||
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
||||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Corey Minyard <minyard@acm.org>
|
||||
|
|
|
|||
|
|
@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
|
|||
|
||||
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
|
||||
|
||||
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
|
||||
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
|
||||
TCPA, TPM2, UEFI, XENV
|
||||
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
|
||||
IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
|
||||
STAO, TCPA, TPM2, UEFI, XENV
|
||||
|
||||
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
|
||||
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
|
||||
PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
|
||||
====== ========================================================================
|
||||
Table Usage for ARMv8 Linux
|
||||
|
|
|
|||
|
|
@ -130,6 +130,9 @@ stable kernels.
|
|||
| Marvell | ARM-MMU-500 | #582743 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
|
|
|
|||
|
|
@ -267,7 +267,7 @@ DATA PATH
|
|||
Tx
|
||||
--
|
||||
|
||||
end_start_xmit() is called by the stack. This function does the following:
|
||||
ena_start_xmit() is called by the stack. This function does the following:
|
||||
|
||||
- Maps data buffers (skb->data and frags).
|
||||
- Populates ena_buf for the push buffer (if the driver and device are
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ purposes as a standard complementary tool. The system's view from
|
|||
``devlink-dpipe`` should change according to the changes done by the
|
||||
standard configuration tools.
|
||||
|
||||
For example, it’s quiet common to implement Access Control Lists (ACL)
|
||||
For example, it’s quite common to implement Access Control Lists (ACL)
|
||||
using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
|
||||
divided into TCAM regions. Complex TC filters can have multiple rules with
|
||||
different priorities and different lookup keys. On the other hand hardware
|
||||
|
|
|
|||
|
|
@ -151,7 +151,7 @@ representor netdevice.
|
|||
-------------
|
||||
A subfunction devlink port is created but it is not active yet. That means the
|
||||
entities are created on devlink side, the e-switch port representor is created,
|
||||
but the subfunction device itself it not created. A user might use e-switch port
|
||||
but the subfunction device itself is not created. A user might use e-switch port
|
||||
representor to do settings, putting it into bridge, adding TC rules, etc. A user
|
||||
might as well configure the hardware address (such as MAC address) of the
|
||||
subfunction while subfunction is inactive.
|
||||
|
|
@ -173,7 +173,7 @@ Terms and Definitions
|
|||
* - Term
|
||||
- Definitions
|
||||
* - ``PCI device``
|
||||
- A physical PCI device having one or more PCI bus consists of one or
|
||||
- A physical PCI device having one or more PCI buses consists of one or
|
||||
more PCI controllers.
|
||||
* - ``PCI controller``
|
||||
- A controller consists of potentially multiple physical functions,
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ Callbacks to implement
|
|||
|
||||
The NIC driver offering ipsec offload will need to implement these
|
||||
callbacks to make the offload available to the network stack's
|
||||
XFRM subsytem. Additionally, the feature bits NETIF_F_HW_ESP and
|
||||
XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
|
||||
NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
|
||||
|
||||
|
||||
|
|
|
|||
15
MAINTAINERS
15
MAINTAINERS
|
|
@ -8520,6 +8520,7 @@ IBM Power SRIOV Virtual NIC Device Driver
|
|||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Lijun Pan <ljp@linux.ibm.com>
|
||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||
R: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||
|
|
@ -12544,7 +12545,7 @@ NETWORKING [MPTCP]
|
|||
M: Mat Martineau <mathew.j.martineau@linux.intel.com>
|
||||
M: Matthieu Baerts <matthieu.baerts@tessares.net>
|
||||
L: netdev@vger.kernel.org
|
||||
L: mptcp@lists.01.org
|
||||
L: mptcp@lists.linux.dev
|
||||
S: Maintained
|
||||
W: https://github.com/multipath-tcp/mptcp_net-next/wiki
|
||||
B: https://github.com/multipath-tcp/mptcp_net-next/issues
|
||||
|
|
@ -14721,15 +14722,11 @@ F: drivers/net/ethernet/qlogic/qlcnic/
|
|||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
M: GR-Linux-NIC-Dev@marvell.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/staging/qlge/
|
||||
|
||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Coiby Xu <coiby.xu@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Supported
|
||||
F: Documentation/networking/device_drivers/qlogic/qlge.rst
|
||||
F: drivers/staging/qlge/
|
||||
|
||||
QM1D1B0004 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
|
|
@ -16899,8 +16896,10 @@ F: tools/spi/
|
|||
|
||||
SPIDERNET NETWORK DRIVER for CELL
|
||||
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
|
||||
M: Geoff Levand <geoff@infradead.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
|
||||
F: drivers/net/ethernet/toshiba/spider_net*
|
||||
|
||||
|
|
|
|||
|
|
@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
|
|||
|
||||
If unsure, say Y.
|
||||
|
||||
config NVIDIA_CARMEL_CNP_ERRATUM
|
||||
bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
|
||||
default y
|
||||
help
|
||||
If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
|
||||
invalidate shared TLB entries installed by a different core, as it would
|
||||
on standard ARM cores.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config SOCIONEXT_SYNQUACER_PREITS
|
||||
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
|
||||
default y
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
|||
} while (--n > 0);
|
||||
|
||||
sum += ((sum >> 32) | (sum << 32));
|
||||
return csum_fold((__force u32)(sum >> 32));
|
||||
return csum_fold((__force __wsum)(sum >> 32));
|
||||
}
|
||||
#define ip_fast_csum ip_fast_csum
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,8 @@
|
|||
#define ARM64_WORKAROUND_1508412 58
|
||||
#define ARM64_HAS_LDAPR 59
|
||||
#define ARM64_KVM_PROTECTED_MODE 60
|
||||
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
|
||||
|
||||
#define ARM64_NCAPS 61
|
||||
#define ARM64_NCAPS 62
|
||||
|
||||
#endif /* __ASM_CPUCAPS_H */
|
||||
|
|
|
|||
|
|
@ -254,6 +254,8 @@ unsigned long get_wchan(struct task_struct *p);
|
|||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
||||
asmlinkage void arm64_preempt_schedule_irq(void);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
|
||||
|
||||
|
|
|
|||
|
|
@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
|
|||
#define arch_setup_new_exec arch_setup_new_exec
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk);
|
||||
int arch_dup_task_struct(struct task_struct *dst,
|
||||
struct task_struct *src);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
0, 0,
|
||||
1, 0),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
|
||||
{
|
||||
/* NVIDIA Carmel */
|
||||
.desc = "NVIDIA Carmel CNP erratum",
|
||||
.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
|
||||
ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1321,7 +1321,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
|||
* may share TLB entries with a CPU stuck in the crashed
|
||||
* kernel.
|
||||
*/
|
||||
if (is_kdump_kernel())
|
||||
if (is_kdump_kernel())
|
||||
return false;
|
||||
|
||||
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
|
||||
return false;
|
||||
|
||||
return has_cpuid_feature(entry, scope);
|
||||
|
|
|
|||
|
|
@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|||
* with the CLIDR_EL1 fields to avoid triggering false warnings
|
||||
* when there is a mismatch across the CPUs. Keep track of the
|
||||
* effective value of the CTR_EL0 in our internal records for
|
||||
* acurate sanity check and feature enablement.
|
||||
* accurate sanity check and feature enablement.
|
||||
*/
|
||||
info->reg_ctr = read_cpuid_effective_cachetype();
|
||||
info->reg_dczid = read_cpuid(DCZID_EL0);
|
||||
|
|
|
|||
|
|
@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|||
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||
{
|
||||
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
|
||||
*ppos += count;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -59,6 +59,8 @@
|
|||
#include <asm/processor.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
|
|
|
|||
|
|
@ -195,8 +195,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|||
|
||||
#ifdef CONFIG_STACKTRACE
|
||||
|
||||
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
||||
struct task_struct *task, struct pt_regs *regs)
|
||||
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
||||
void *cookie, struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct stackframe frame;
|
||||
|
||||
|
|
@ -204,8 +205,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|||
start_backtrace(&frame, regs->regs[29], regs->pc);
|
||||
else if (task == current)
|
||||
start_backtrace(&frame,
|
||||
(unsigned long)__builtin_frame_address(0),
|
||||
(unsigned long)arch_stack_walk);
|
||||
(unsigned long)__builtin_frame_address(1),
|
||||
(unsigned long)__builtin_return_address(0));
|
||||
else
|
||||
start_backtrace(&frame, thread_saved_fp(task),
|
||||
thread_saved_pc(task));
|
||||
|
|
|
|||
|
|
@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
|
|||
struct range arch_get_mappable_range(void)
|
||||
{
|
||||
struct range mhp_range;
|
||||
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
|
||||
u64 end_linear_pa = __pa(PAGE_END - 1);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||
/*
|
||||
* Check for a wrap, it is possible because of randomized linear
|
||||
* mapping the start physical address is actually bigger than
|
||||
* the end physical address. In this case set start to zero
|
||||
* because [0, end_linear_pa] range must still be able to cover
|
||||
* all addressable physical addresses.
|
||||
*/
|
||||
if (start_linear_pa > end_linear_pa)
|
||||
start_linear_pa = 0;
|
||||
}
|
||||
|
||||
WARN_ON(start_linear_pa > end_linear_pa);
|
||||
|
||||
/*
|
||||
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
|
||||
|
|
@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
|
|||
* range which can be mapped inside this linear mapping range, must
|
||||
* also be derived from its end points.
|
||||
*/
|
||||
mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
|
||||
mhp_range.end = __pa(PAGE_END - 1);
|
||||
mhp_range.start = start_linear_pa;
|
||||
mhp_range.end = end_linear_pa;
|
||||
|
||||
return mhp_range;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
|
|||
char *buf) \
|
||||
{ \
|
||||
u32 cpu=dev->id; \
|
||||
return sprintf(buf, "%lx\n", name[cpu]); \
|
||||
return sprintf(buf, "%llx\n", name[cpu]); \
|
||||
}
|
||||
|
||||
#define store(name) \
|
||||
|
|
@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
|
||||
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
|
||||
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
|
||||
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
|
||||
printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
|
||||
printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
|
||||
printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3);
|
||||
|
|
@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
|
||||
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
|
||||
printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
|
||||
printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
|
||||
#endif
|
||||
return size;
|
||||
}
|
||||
|
|
@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
char *buf)
|
||||
{
|
||||
unsigned int cpu=dev->id;
|
||||
return sprintf(buf, "%lx\n", phys_addr[cpu]);
|
||||
return sprintf(buf, "%llx\n", phys_addr[cpu]);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
|
@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
|
||||
if (ret<=0) {
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
||||
printk("Virtual address %llx is not existing.\n", virt_addr);
|
||||
#endif
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
|
|||
{
|
||||
unsigned int cpu=dev->id;
|
||||
|
||||
return sprintf(buf, "%lx, %lx, %lx\n",
|
||||
return sprintf(buf, "%llx, %llx, %llx\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3);
|
||||
|
|
@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
|
|||
int ret;
|
||||
|
||||
#ifdef ERR_INJ_DEBUG
|
||||
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
|
||||
printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
|
||||
err_data_buffer[cpu].data1,
|
||||
err_data_buffer[cpu].data2,
|
||||
err_data_buffer[cpu].data3,
|
||||
cpu);
|
||||
#endif
|
||||
ret=sscanf(buf, "%lx, %lx, %lx",
|
||||
ret = sscanf(buf, "%llx, %llx, %llx",
|
||||
&err_data_buffer[cpu].data1,
|
||||
&err_data_buffer[cpu].data2,
|
||||
&err_data_buffer[cpu].data3);
|
||||
|
|
|
|||
|
|
@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
|
|||
data = mca_bootmem();
|
||||
first_time = 0;
|
||||
} else
|
||||
data = (void *)__get_free_pages(GFP_KERNEL,
|
||||
data = (void *)__get_free_pages(GFP_ATOMIC,
|
||||
get_order(sz));
|
||||
if (!data)
|
||||
panic("Could not allocate MCA memory for cpu %d\n",
|
||||
|
|
|
|||
|
|
@ -1936,7 +1936,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
|||
* add rsp, 8 // skip eth_type_trans's frame
|
||||
* ret // return to its caller
|
||||
*/
|
||||
int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
||||
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||
const struct btf_func_model *m, u32 flags,
|
||||
struct bpf_tramp_progs *tprogs,
|
||||
void *orig_call)
|
||||
|
|
@ -1975,6 +1975,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||
|
||||
save_regs(m, &prog, nr_args, stack_size);
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
/* arg1: mov rdi, im */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||
if (emit_call(&prog, __bpf_tramp_enter, prog)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
}
|
||||
|
||||
if (fentry->nr_progs)
|
||||
if (invoke_bpf(m, &prog, fentry, stack_size))
|
||||
return -EINVAL;
|
||||
|
|
@ -1993,8 +2002,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||
}
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
if (fentry->nr_progs || fmod_ret->nr_progs)
|
||||
restore_regs(m, &prog, nr_args, stack_size);
|
||||
restore_regs(m, &prog, nr_args, stack_size);
|
||||
|
||||
/* call original function */
|
||||
if (emit_call(&prog, orig_call, prog)) {
|
||||
|
|
@ -2003,6 +2011,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||
}
|
||||
/* remember return value in a stack for bpf prog to access */
|
||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||
im->ip_after_call = prog;
|
||||
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
||||
prog += X86_PATCH_SIZE;
|
||||
}
|
||||
|
||||
if (fmod_ret->nr_progs) {
|
||||
|
|
@ -2033,9 +2044,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||
* the return value is only updated on the stack and still needs to be
|
||||
* restored to R0.
|
||||
*/
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
im->ip_epilogue = prog;
|
||||
/* arg1: mov rdi, im */
|
||||
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||
if (emit_call(&prog, __bpf_tramp_exit, prog)) {
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* restore original return value back into RAX */
|
||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||
}
|
||||
|
||||
EMIT1(0x5B); /* pop rbx */
|
||||
EMIT1(0xC9); /* leave */
|
||||
|
|
@ -2225,7 +2244,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
padding = true;
|
||||
goto skip_init_addrs;
|
||||
}
|
||||
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
||||
addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
||||
if (!addrs) {
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
|
|
@ -2317,7 +2336,7 @@ out_image:
|
|||
if (image)
|
||||
bpf_prog_fill_jited_linfo(prog, addrs + 1);
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
kvfree(addrs);
|
||||
kfree(jit_data);
|
||||
prog->aux->jit_data = NULL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||
struct clk_rate_request parent_req = { };
|
||||
struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
|
||||
struct clk_hw *xo, *p0, *p1, *p2;
|
||||
unsigned long request, p0_rate;
|
||||
unsigned long p0_rate;
|
||||
u8 mux_div = cgfx->div;
|
||||
int ret;
|
||||
|
||||
p0 = cgfx->hws[0];
|
||||
|
|
@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||
return 0;
|
||||
}
|
||||
|
||||
request = req->rate;
|
||||
if (cgfx->div > 1)
|
||||
parent_req.rate = request = request * cgfx->div;
|
||||
if (mux_div == 0)
|
||||
mux_div = 1;
|
||||
|
||||
parent_req.rate = req->rate * mux_div;
|
||||
|
||||
/* This has to be a fixed rate PLL */
|
||||
p0_rate = clk_hw_get_rate(p0);
|
||||
|
||||
if (request == p0_rate) {
|
||||
if (parent_req.rate == p0_rate) {
|
||||
req->rate = req->best_parent_rate = p0_rate;
|
||||
req->best_parent_hw = p0;
|
||||
return 0;
|
||||
|
|
@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||
|
||||
if (req->best_parent_hw == p0) {
|
||||
/* Are we going back to a previously used rate? */
|
||||
if (clk_hw_get_rate(p2) == request)
|
||||
if (clk_hw_get_rate(p2) == parent_req.rate)
|
||||
req->best_parent_hw = p2;
|
||||
else
|
||||
req->best_parent_hw = p1;
|
||||
|
|
@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||
return ret;
|
||||
|
||||
req->rate = req->best_parent_rate = parent_req.rate;
|
||||
if (cgfx->div > 1)
|
||||
req->rate /= cgfx->div;
|
||||
req->rate /= mux_div;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
|
|||
.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
|
||||
};
|
||||
|
||||
/* Resource name must match resource id present in cmd-db */
|
||||
DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
|
||||
|
||||
static struct clk_hw *sc7280_rpmh_clocks[] = {
|
||||
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
|
||||
[RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
|
||||
[RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
|
||||
[RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
|
||||
[RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
|
||||
[RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
|
||||
[RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
|
||||
|
|
|
|||
|
|
@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
|
|||
.name = "gcc_sdcc1_apps_clk_src",
|
||||
.parent_data = gcc_parent_data_1,
|
||||
.num_parents = 5,
|
||||
.ops = &clk_rcg2_ops,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
|
|||
.name = "gcc_sdcc1_ice_core_clk_src",
|
||||
.parent_data = gcc_parent_data_0,
|
||||
.num_parents = 4,
|
||||
.ops = &clk_rcg2_floor_ops,
|
||||
.ops = &clk_rcg2_ops,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
|
|||
ep->com.local_addr.ss_family == AF_INET) {
|
||||
err = cxgb4_remove_server_filter(
|
||||
ep->com.dev->rdev.lldi.ports[0], ep->stid,
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0], false);
|
||||
} else {
|
||||
struct sockaddr_in6 *sin6;
|
||||
c4iw_init_wr_wait(ep->com.wr_waitp);
|
||||
err = cxgb4_remove_server(
|
||||
ep->com.dev->rdev.lldi.ports[0], ep->stid,
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
|
||||
ep->com.dev->rdev.lldi.rxq_ids[0], true);
|
||||
if (err)
|
||||
goto done;
|
||||
err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
|
||||
|
|
|
|||
|
|
@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
|
|||
upper_32_bits(dma));
|
||||
roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
|
||||
(u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
|
||||
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
|
||||
|
||||
/* Make sure to write tail first and then head */
|
||||
roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
|
||||
roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
|
||||
} else {
|
||||
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
|
||||
roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
|
||||
|
|
|
|||
|
|
@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
|
|||
case MLX5_CMD_OP_CREATE_MKEY:
|
||||
MLX5_SET(destroy_mkey_in, din, opcode,
|
||||
MLX5_CMD_OP_DESTROY_MKEY);
|
||||
MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
|
||||
MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
|
||||
break;
|
||||
case MLX5_CMD_OP_CREATE_CQ:
|
||||
MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
|
||||
|
|
|
|||
|
|
@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
|
|||
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
|
||||
MLX5_SET(qpc, qpc, uar_page, uar_index);
|
||||
MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
|
||||
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
|
||||
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
|
||||
|
||||
/* Set "fast registration enabled" for all kernel QPs */
|
||||
|
|
@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
|
|||
}
|
||||
return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
|
||||
}
|
||||
return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
|
||||
MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
}
|
||||
|
||||
static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
|
||||
|
|
@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
|
|||
}
|
||||
return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
|
||||
}
|
||||
return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
|
||||
MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
}
|
||||
|
||||
static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
|
||||
|
|
@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
|
|||
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
|
||||
MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
|
||||
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
|
||||
int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
|
||||
MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
|
||||
|
||||
if (recv_cq &&
|
||||
recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
|
||||
|
|
@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
|
|||
if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
|
||||
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
|
||||
|
||||
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
|
||||
MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
|
||||
MLX5_SET(qpc, qpc, no_sq, 1);
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
|
||||
|
|
@ -4873,6 +4877,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
|
|||
struct mlx5_ib_dev *dev;
|
||||
int has_net_offloads;
|
||||
__be64 *rq_pas0;
|
||||
int ts_format;
|
||||
void *in;
|
||||
void *rqc;
|
||||
void *wq;
|
||||
|
|
@ -4881,6 +4886,10 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
|
|||
|
||||
dev = to_mdev(pd->device);
|
||||
|
||||
ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
|
||||
if (ts_format < 0)
|
||||
return ts_format;
|
||||
|
||||
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
|
||||
in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!in)
|
||||
|
|
@ -4890,6 +4899,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
|
|||
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
|
||||
MLX5_SET(rqc, rqc, mem_rq_type,
|
||||
MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
|
||||
MLX5_SET(rqc, rqc, ts_format, ts_format);
|
||||
MLX5_SET(rqc, rqc, user_index, rwq->user_index);
|
||||
MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
|
||||
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
|
||||
|
|
|
|||
|
|
@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
|
|||
* Return value: CAPI result code
|
||||
*/
|
||||
|
||||
u16 capi20_get_manufacturer(u32 contr, u8 *buf)
|
||||
u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
|
||||
{
|
||||
struct capi_ctr *ctr;
|
||||
u16 ret;
|
||||
|
|
@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
|
|||
* Return value: CAPI result code
|
||||
*/
|
||||
|
||||
u16 capi20_get_serial(u32 contr, u8 *serial)
|
||||
u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
|
||||
{
|
||||
struct capi_ctr *ctr;
|
||||
u16 ret;
|
||||
|
|
|
|||
|
|
@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac)
|
|||
{
|
||||
if (isac->type & IPAC_TYPE_ISACX)
|
||||
WriteISAC(isac, ISACX_MASK, 0xff);
|
||||
else
|
||||
else if (isac->type != 0)
|
||||
WriteISAC(isac, ISAC_MASK, 0xff);
|
||||
if (isac->dch.timer.function != NULL) {
|
||||
del_timer(&isac->dch.timer);
|
||||
|
|
|
|||
|
|
@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const struct resource intel_quark_i2c_res[] = {
|
||||
/* This is used as a place holder and will be modified at run-time */
|
||||
static struct resource intel_quark_i2c_res[] = {
|
||||
[INTEL_QUARK_IORES_MEM] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
|
|
@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = {
|
|||
.adr = MFD_ACPI_MATCH_I2C,
|
||||
};
|
||||
|
||||
static const struct resource intel_quark_gpio_res[] = {
|
||||
/* This is used as a place holder and will be modified at run-time */
|
||||
static struct resource intel_quark_gpio_res[] = {
|
||||
[INTEL_QUARK_IORES_MEM] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
|
|
|
|||
|
|
@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
int i, ioaddr, ret;
|
||||
struct resource *r;
|
||||
|
||||
ret = 0;
|
||||
|
||||
if (pci_enable_device(pdev))
|
||||
return -EIO;
|
||||
|
||||
|
|
@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
priv->ci = ci;
|
||||
mm = &ci->misc_map;
|
||||
|
||||
pci_set_drvdata(pdev, priv);
|
||||
|
||||
INIT_LIST_HEAD(&priv->list_dev);
|
||||
|
||||
if (mm->size) {
|
||||
|
|
@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
dev = alloc_arcdev(device);
|
||||
if (!dev) {
|
||||
ret = -ENOMEM;
|
||||
goto out_port;
|
||||
break;
|
||||
}
|
||||
dev->dev_port = i;
|
||||
|
||||
|
|
@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
pr_err("IO region %xh-%xh already allocated\n",
|
||||
ioaddr, ioaddr + cm->size - 1);
|
||||
ret = -EBUSY;
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
}
|
||||
|
||||
/* Dummy access after Reset
|
||||
|
|
@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
|
||||
pr_err("IO address %Xh is empty!\n", ioaddr);
|
||||
ret = -EIO;
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
}
|
||||
if (com20020_check(dev)) {
|
||||
ret = -EIO;
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
}
|
||||
|
||||
card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
|
||||
GFP_KERNEL);
|
||||
if (!card) {
|
||||
ret = -ENOMEM;
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
}
|
||||
|
||||
card->index = i;
|
||||
|
|
@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev,
|
|||
|
||||
ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
|
||||
if (ret)
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
|
||||
ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
|
||||
if (ret)
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
|
||||
dev_set_drvdata(&dev->dev, card);
|
||||
|
||||
ret = com20020_found(dev, IRQF_SHARED);
|
||||
if (ret)
|
||||
goto out_port;
|
||||
goto err_free_arcdev;
|
||||
|
||||
devm_arcnet_led_init(dev, dev->dev_id, i);
|
||||
|
||||
list_add(&card->list, &priv->list_dev);
|
||||
continue;
|
||||
|
||||
err_free_arcdev:
|
||||
free_arcdev(dev);
|
||||
break;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, priv);
|
||||
|
||||
return 0;
|
||||
|
||||
out_port:
|
||||
com20020pci_remove(pdev);
|
||||
if (ret)
|
||||
com20020pci_remove(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3978,15 +3978,11 @@ static int bond_neigh_init(struct neighbour *n)
|
|||
|
||||
rcu_read_lock();
|
||||
slave = bond_first_slave_rcu(bond);
|
||||
if (!slave) {
|
||||
ret = -EINVAL;
|
||||
if (!slave)
|
||||
goto out;
|
||||
}
|
||||
slave_ops = slave->dev->netdev_ops;
|
||||
if (!slave_ops->ndo_neigh_setup) {
|
||||
ret = -EINVAL;
|
||||
if (!slave_ops->ndo_neigh_setup)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TODO: find another way [1] to implement this.
|
||||
* Passing a zeroed structure is fragile,
|
||||
|
|
|
|||
|
|
@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
|
|||
.brp_inc = 1,
|
||||
};
|
||||
|
||||
static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
|
||||
{
|
||||
if (priv->device)
|
||||
pm_runtime_enable(priv->device);
|
||||
}
|
||||
|
||||
static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
|
||||
{
|
||||
if (priv->device)
|
||||
pm_runtime_disable(priv->device);
|
||||
}
|
||||
|
||||
static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
|
||||
{
|
||||
if (priv->device)
|
||||
|
|
@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
|
|||
|
||||
int register_c_can_dev(struct net_device *dev)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
/* Deactivate pins to prevent DRA7 DCAN IP from being
|
||||
|
|
@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
|
|||
*/
|
||||
pinctrl_pm_select_sleep_state(dev->dev.parent);
|
||||
|
||||
c_can_pm_runtime_enable(priv);
|
||||
|
||||
dev->flags |= IFF_ECHO; /* we support local echo */
|
||||
dev->netdev_ops = &c_can_netdev_ops;
|
||||
|
||||
err = register_candev(dev);
|
||||
if (err)
|
||||
c_can_pm_runtime_disable(priv);
|
||||
else
|
||||
if (!err)
|
||||
devm_can_led_init(dev);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_c_can_dev);
|
||||
|
||||
void unregister_c_can_dev(struct net_device *dev)
|
||||
{
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
|
||||
unregister_candev(dev);
|
||||
|
||||
c_can_pm_runtime_disable(priv);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_c_can_dev);
|
||||
|
||||
|
|
|
|||
|
|
@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
|
|||
{
|
||||
struct net_device *dev = pci_get_drvdata(pdev);
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
void __iomem *addr = priv->base;
|
||||
|
||||
unregister_c_can_dev(dev);
|
||||
|
||||
free_c_can_dev(dev);
|
||||
|
||||
pci_iounmap(pdev, priv->base);
|
||||
pci_iounmap(pdev, addr);
|
||||
pci_disable_msi(pdev);
|
||||
pci_clear_master(pdev);
|
||||
pci_release_regions(pdev);
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
|
|
@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, dev);
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
pm_runtime_enable(priv->device);
|
||||
ret = register_c_can_dev(dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
|
||||
|
|
@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
|
|||
return 0;
|
||||
|
||||
exit_free_device:
|
||||
pm_runtime_disable(priv->device);
|
||||
free_c_can_dev(dev);
|
||||
exit:
|
||||
dev_err(&pdev->dev, "probe failed\n");
|
||||
|
|
@ -408,9 +411,10 @@ exit:
|
|||
static int c_can_plat_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *dev = platform_get_drvdata(pdev);
|
||||
struct c_can_priv *priv = netdev_priv(dev);
|
||||
|
||||
unregister_c_can_dev(dev);
|
||||
|
||||
pm_runtime_disable(priv->device);
|
||||
free_c_can_dev(dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
|
|||
|
||||
struct rtnl_link_ops can_link_ops __read_mostly = {
|
||||
.kind = "can",
|
||||
.netns_refund = true,
|
||||
.maxtype = IFLA_CAN_MAX,
|
||||
.policy = can_policy,
|
||||
.setup = can_setup,
|
||||
|
|
|
|||
|
|
@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
|
|||
static int flexcan_chip_freeze(struct flexcan_priv *priv)
|
||||
{
|
||||
struct flexcan_regs __iomem *regs = priv->regs;
|
||||
unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
|
||||
unsigned int timeout;
|
||||
u32 bitrate = priv->can.bittiming.bitrate;
|
||||
u32 reg;
|
||||
|
||||
if (bitrate)
|
||||
timeout = 1000 * 1000 * 10 / bitrate;
|
||||
else
|
||||
timeout = FLEXCAN_TIMEOUT_US / 10;
|
||||
|
||||
reg = priv->read(®s->mcr);
|
||||
reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
|
||||
priv->write(reg, ®s->mcr);
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
|
|||
#define KVASER_PCIEFD_KCAN_STAT_REG 0x418
|
||||
#define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
|
||||
#define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
|
||||
#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
|
||||
#define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
|
||||
#define KVASER_PCIEFD_KCAN_PWM_REG 0x430
|
||||
/* Loopback control register */
|
||||
|
|
@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
|
|||
timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
|
||||
0);
|
||||
|
||||
/* Disable Bus load reporting */
|
||||
iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
|
||||
|
||||
tx_npackets = ioread32(can->reg_base +
|
||||
KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
|
||||
if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
|
||||
|
|
|
|||
|
|
@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
|
|||
}
|
||||
|
||||
while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
|
||||
if (rxfs & RXFS_RFL)
|
||||
netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
|
||||
|
||||
m_can_read_fifo(dev, rxfs);
|
||||
|
||||
quota--;
|
||||
|
|
@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
|
|||
{
|
||||
struct m_can_classdev *cdev = netdev_priv(dev);
|
||||
|
||||
m_can_rx_handler(dev, 1);
|
||||
m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
|
||||
|
||||
m_can_enable_all_interrupts(cdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -73,6 +73,7 @@ config CAN_KVASER_USB
|
|||
- Kvaser Memorator Pro 5xHS
|
||||
- Kvaser USBcan Light 4xHS
|
||||
- Kvaser USBcan Pro 2xHS v2
|
||||
- Kvaser USBcan Pro 4xHS
|
||||
- Kvaser USBcan Pro 5xHS
|
||||
- Kvaser U100
|
||||
- Kvaser U100P
|
||||
|
|
|
|||
|
|
@ -86,8 +86,9 @@
|
|||
#define USB_U100_PRODUCT_ID 273
|
||||
#define USB_U100P_PRODUCT_ID 274
|
||||
#define USB_U100S_PRODUCT_ID 275
|
||||
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
|
||||
#define USB_HYDRA_PRODUCT_ID_END \
|
||||
USB_U100S_PRODUCT_ID
|
||||
USB_USBCAN_PRO_4HS_PRODUCT_ID
|
||||
|
||||
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
|
||||
{
|
||||
|
|
@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = {
|
|||
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
|
||||
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
|
||||
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
|
||||
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
|
||||
|
|
|
|||
|
|
@ -1105,13 +1105,6 @@ static int b53_setup(struct dsa_switch *ds)
|
|||
b53_disable_port(ds, port);
|
||||
}
|
||||
|
||||
/* Let DSA handle the case were multiple bridges span the same switch
|
||||
* device and different VLAN awareness settings are requested, which
|
||||
* would be breaking filtering semantics for any of the other bridge
|
||||
* devices. (not hardware supported)
|
||||
*/
|
||||
ds->vlan_filtering_is_global = true;
|
||||
|
||||
return b53_setup_devlink_resources(ds);
|
||||
}
|
||||
|
||||
|
|
@ -2664,6 +2657,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
|
|||
ds->ops = &b53_switch_ops;
|
||||
ds->untag_bridge_pvid = true;
|
||||
dev->vlan_enabled = true;
|
||||
/* Let DSA handle the case were multiple bridges span the same switch
|
||||
* device and different VLAN awareness settings are requested, which
|
||||
* would be breaking filtering semantics for any of the other bridge
|
||||
* devices. (not hardware supported)
|
||||
*/
|
||||
ds->vlan_filtering_is_global = true;
|
||||
|
||||
mutex_init(&dev->reg_mutex);
|
||||
mutex_init(&dev->stats_mutex);
|
||||
|
||||
|
|
|
|||
|
|
@ -114,7 +114,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
|
|||
/* Force link status for IMP port */
|
||||
reg = core_readl(priv, offset);
|
||||
reg |= (MII_SW_OR | LINK_STS);
|
||||
reg &= ~GMII_SPEED_UP_2G;
|
||||
if (priv->type == BCM4908_DEVICE_ID)
|
||||
reg |= GMII_SPEED_UP_2G;
|
||||
else
|
||||
reg &= ~GMII_SPEED_UP_2G;
|
||||
core_writel(priv, reg, offset);
|
||||
|
||||
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
|
||||
|
|
@ -585,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
|
|||
* in bits 15:8 and the patch level in bits 7:0 which is exactly what
|
||||
* the REG_PHY_REVISION register layout is.
|
||||
*/
|
||||
|
||||
return priv->hw_params.gphy_rev;
|
||||
if (priv->int_phy_mask & BIT(port))
|
||||
return priv->hw_params.gphy_rev;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
|
||||
|
|
|
|||
|
|
@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
|
|||
TD_DM_DRVP(8) | TD_DM_DRVN(8));
|
||||
|
||||
/* Setup core clock for MT7530 */
|
||||
if (!trgint) {
|
||||
/* Disable MT7530 core clock */
|
||||
core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
|
||||
/* Disable MT7530 core clock */
|
||||
core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
|
||||
|
||||
/* Disable PLL, since phy_device has not yet been created
|
||||
* provided for phy_[read,write]_mmd_indirect is called, we
|
||||
* provide our own core_write_mmd_indirect to complete this
|
||||
* function.
|
||||
*/
|
||||
core_write_mmd_indirect(priv,
|
||||
CORE_GSWPLL_GRP1,
|
||||
MDIO_MMD_VEND2,
|
||||
0);
|
||||
/* Disable PLL, since phy_device has not yet been created
|
||||
* provided for phy_[read,write]_mmd_indirect is called, we
|
||||
* provide our own core_write_mmd_indirect to complete this
|
||||
* function.
|
||||
*/
|
||||
core_write_mmd_indirect(priv,
|
||||
CORE_GSWPLL_GRP1,
|
||||
MDIO_MMD_VEND2,
|
||||
0);
|
||||
|
||||
/* Set core clock into 500Mhz */
|
||||
core_write(priv, CORE_GSWPLL_GRP2,
|
||||
RG_GSWPLL_POSDIV_500M(1) |
|
||||
RG_GSWPLL_FBKDIV_500M(25));
|
||||
/* Set core clock into 500Mhz */
|
||||
core_write(priv, CORE_GSWPLL_GRP2,
|
||||
RG_GSWPLL_POSDIV_500M(1) |
|
||||
RG_GSWPLL_FBKDIV_500M(25));
|
||||
|
||||
/* Enable PLL */
|
||||
core_write(priv, CORE_GSWPLL_GRP1,
|
||||
RG_GSWPLL_EN_PRE |
|
||||
RG_GSWPLL_POSDIV_200M(2) |
|
||||
RG_GSWPLL_FBKDIV_200M(32));
|
||||
/* Enable PLL */
|
||||
core_write(priv, CORE_GSWPLL_GRP1,
|
||||
RG_GSWPLL_EN_PRE |
|
||||
RG_GSWPLL_POSDIV_200M(2) |
|
||||
RG_GSWPLL_FBKDIV_200M(32));
|
||||
|
||||
/* Enable MT7530 core clock */
|
||||
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
|
||||
}
|
||||
/* Enable MT7530 core clock */
|
||||
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
|
||||
|
||||
/* Setup the MT7530 TRGMII Tx Clock */
|
||||
core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ config B44_PCI
|
|||
config BCM4908_ENET
|
||||
tristate "Broadcom BCM4908 internal mac support"
|
||||
depends on ARCH_BCM4908 || COMPILE_TEST
|
||||
default y
|
||||
default y if ARCH_BCM4908
|
||||
help
|
||||
This driver supports Ethernet controller integrated into Broadcom
|
||||
BCM4908 family SoCs.
|
||||
|
|
|
|||
|
|
@ -722,7 +722,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
|
|||
kvfree(tx_info);
|
||||
return 0;
|
||||
}
|
||||
tx_info->open_state = false;
|
||||
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
|
||||
spin_unlock(&tx_info->lock);
|
||||
|
||||
complete(&tx_info->completion);
|
||||
|
|
|
|||
|
|
@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
|
|||
*/
|
||||
if (unlikely(priv->need_mac_restart)) {
|
||||
ftgmac100_start_hw(priv);
|
||||
priv->need_mac_restart = false;
|
||||
|
||||
/* Re-enable "bad" interrupts */
|
||||
iowrite32(FTGMAC100_INT_BAD,
|
||||
|
|
|
|||
|
|
@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
|
|||
} else {
|
||||
data &= ~IGP02E1000_PM_D0_LPLU;
|
||||
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
|
||||
* during Dx states where the power conservation is most
|
||||
* important. During driver activity we should enable
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright(c) 1999 - 2018 Intel Corporation. */
|
||||
|
||||
#ifndef _E1000_HW_H_
|
||||
#define _E1000_HW_H_
|
||||
#ifndef _E1000E_HW_H_
|
||||
#define _E1000E_HW_H_
|
||||
|
||||
#include "regs.h"
|
||||
#include "defines.h"
|
||||
|
|
@ -714,4 +714,4 @@ struct e1000_hw {
|
|||
#include "80003es2lan.h"
|
||||
#include "ich8lan.h"
|
||||
|
||||
#endif
|
||||
#endif /* _E1000E_HW_H_ */
|
||||
|
|
|
|||
|
|
@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
|
|||
struct e1000_adapter *adapter;
|
||||
adapter = container_of(work, struct e1000_adapter, reset_task);
|
||||
|
||||
rtnl_lock();
|
||||
/* don't run the task if already down */
|
||||
if (test_bit(__E1000_DOWN, &adapter->state))
|
||||
if (test_bit(__E1000_DOWN, &adapter->state)) {
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(adapter->flags & FLAG_RESTART_NOW)) {
|
||||
e1000e_dump(adapter);
|
||||
e_err("Reset adapter unexpectedly\n");
|
||||
}
|
||||
e1000e_reinit_locked(adapter);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -3258,6 +3258,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_rx_offset - Return expected offset into page to access data
|
||||
* @rx_ring: Ring we are requesting offset of
|
||||
*
|
||||
* Returns the offset value for ring into the data buffer.
|
||||
*/
|
||||
static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
|
||||
{
|
||||
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_configure_rx_ring - Configure a receive ring context
|
||||
* @ring: The Rx ring to configure
|
||||
|
|
@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
|
|||
else
|
||||
set_ring_build_skb_enabled(ring);
|
||||
|
||||
ring->rx_offset = i40e_rx_offset(ring);
|
||||
|
||||
/* cache tail for quicker writes, and clear the reg before use */
|
||||
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
|
||||
writel(0, ring->tail);
|
||||
|
|
|
|||
|
|
@ -1569,17 +1569,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_rx_offset - Return expected offset into page to access data
|
||||
* @rx_ring: Ring we are requesting offset of
|
||||
*
|
||||
* Returns the offset value for ring into the data buffer.
|
||||
*/
|
||||
static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
|
||||
{
|
||||
return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_setup_rx_descriptors - Allocate Rx descriptors
|
||||
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
|
||||
|
|
@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
|
|||
rx_ring->next_to_alloc = 0;
|
||||
rx_ring->next_to_clean = 0;
|
||||
rx_ring->next_to_use = 0;
|
||||
rx_ring->rx_offset = i40e_rx_offset(rx_ring);
|
||||
|
||||
/* XDP RX-queue info only needed for RX rings exposed to XDP */
|
||||
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
|
||||
|
|
|
|||
|
|
@ -274,6 +274,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
|
|||
tlan_ctx->legacy_int = ICE_TX_LEGACY;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_offset - Return expected offset into page to access data
|
||||
* @rx_ring: Ring we are requesting offset of
|
||||
*
|
||||
* Returns the offset value for ring into the data buffer.
|
||||
*/
|
||||
static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
|
||||
{
|
||||
if (ice_ring_uses_build_skb(rx_ring))
|
||||
return ICE_SKB_PAD;
|
||||
else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
|
||||
return XDP_PACKET_HEADROOM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_setup_rx_ctx - Configure a receive ring context
|
||||
* @ring: The Rx ring to configure
|
||||
|
|
@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
|||
else
|
||||
ice_set_ring_build_skb_ena(ring);
|
||||
|
||||
ring->rx_offset = ice_rx_offset(ring);
|
||||
|
||||
/* init queue specific tail register */
|
||||
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
|
||||
writel(0, ring->tail);
|
||||
|
||||
if (ring->xsk_pool) {
|
||||
bool ok;
|
||||
|
||||
if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
|
||||
dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
|
||||
num_bufs, ring->q_index);
|
||||
|
|
@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
|||
return 0;
|
||||
}
|
||||
|
||||
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
|
||||
if (err)
|
||||
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
|
||||
if (!ok)
|
||||
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
|
||||
ring->q_index, pf_q);
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -443,22 +443,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_rx_offset - Return expected offset into page to access data
|
||||
* @rx_ring: Ring we are requesting offset of
|
||||
*
|
||||
* Returns the offset value for ring into the data buffer.
|
||||
*/
|
||||
static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
|
||||
{
|
||||
if (ice_ring_uses_build_skb(rx_ring))
|
||||
return ICE_SKB_PAD;
|
||||
else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
|
||||
return XDP_PACKET_HEADROOM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_setup_rx_ring - Allocate the Rx descriptors
|
||||
* @rx_ring: the Rx ring to set up
|
||||
|
|
@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
|
|||
|
||||
rx_ring->next_to_use = 0;
|
||||
rx_ring->next_to_clean = 0;
|
||||
rx_ring->rx_offset = ice_rx_offset(rx_ring);
|
||||
|
||||
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
|
||||
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
|
||||
|
|
|
|||
|
|
@ -358,18 +358,18 @@ xsk_pool_if_up:
|
|||
* This function allocates a number of Rx buffers from the fill ring
|
||||
* or the internal recycle mechanism and places them on the Rx ring.
|
||||
*
|
||||
* Returns false if all allocations were successful, true if any fail.
|
||||
* Returns true if all allocations were successful, false if any fail.
|
||||
*/
|
||||
bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
|
||||
{
|
||||
union ice_32b_rx_flex_desc *rx_desc;
|
||||
u16 ntu = rx_ring->next_to_use;
|
||||
struct ice_rx_buf *rx_buf;
|
||||
bool ret = false;
|
||||
bool ok = true;
|
||||
dma_addr_t dma;
|
||||
|
||||
if (!count)
|
||||
return false;
|
||||
return true;
|
||||
|
||||
rx_desc = ICE_RX_DESC(rx_ring, ntu);
|
||||
rx_buf = &rx_ring->rx_buf[ntu];
|
||||
|
|
@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
|
|||
do {
|
||||
rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
|
||||
if (!rx_buf->xdp) {
|
||||
ret = true;
|
||||
ok = false;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
|
|||
ice_release_rx_desc(rx_ring, ntu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ok;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright(c) 2007 - 2018 Intel Corporation. */
|
||||
|
||||
#ifndef _E1000_HW_H_
|
||||
#define _E1000_HW_H_
|
||||
#ifndef _E1000_IGB_HW_H_
|
||||
#define _E1000_IGB_HW_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/delay.h>
|
||||
|
|
@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
|
|||
|
||||
void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
|
||||
void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
|
||||
#endif /* _E1000_HW_H_ */
|
||||
#endif /* _E1000_IGB_HW_H_ */
|
||||
|
|
|
|||
|
|
@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
|
|||
void igb_ptp_rx_hang(struct igb_adapter *adapter);
|
||||
void igb_ptp_tx_hang(struct igb_adapter *adapter);
|
||||
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
|
||||
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb);
|
||||
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb);
|
||||
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
|
||||
|
|
|
|||
|
|
@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
|
|||
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||
}
|
||||
|
||||
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
|
||||
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
|
||||
int rx_buf_pgcnt)
|
||||
{
|
||||
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
|
||||
struct page *page = rx_buffer->page;
|
||||
|
|
@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
|
|||
|
||||
#if (PAGE_SIZE < 8192)
|
||||
/* if we are only owner of page we can reuse it */
|
||||
if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
|
||||
if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
|
||||
return false;
|
||||
#else
|
||||
#define IGB_LAST_OFFSET \
|
||||
|
|
@ -8301,9 +8302,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
|
|||
return NULL;
|
||||
|
||||
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
|
||||
xdp->data += IGB_TS_HDR_LEN;
|
||||
size -= IGB_TS_HDR_LEN;
|
||||
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
|
||||
xdp->data += IGB_TS_HDR_LEN;
|
||||
size -= IGB_TS_HDR_LEN;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine available headroom for copy */
|
||||
|
|
@ -8364,8 +8366,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
|
|||
|
||||
/* pull timestamp out of packet data */
|
||||
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
|
||||
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
|
||||
__skb_pull(skb, IGB_TS_HDR_LEN);
|
||||
if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
|
||||
__skb_pull(skb, IGB_TS_HDR_LEN);
|
||||
}
|
||||
|
||||
/* update buffer offset */
|
||||
|
|
@ -8614,11 +8616,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
|
|||
}
|
||||
|
||||
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
|
||||
const unsigned int size)
|
||||
const unsigned int size, int *rx_buf_pgcnt)
|
||||
{
|
||||
struct igb_rx_buffer *rx_buffer;
|
||||
|
||||
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
|
||||
*rx_buf_pgcnt =
|
||||
#if (PAGE_SIZE < 8192)
|
||||
page_count(rx_buffer->page);
|
||||
#else
|
||||
0;
|
||||
#endif
|
||||
prefetchw(rx_buffer->page);
|
||||
|
||||
/* we are reusing so sync this buffer for CPU use */
|
||||
|
|
@ -8634,9 +8642,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
|
|||
}
|
||||
|
||||
static void igb_put_rx_buffer(struct igb_ring *rx_ring,
|
||||
struct igb_rx_buffer *rx_buffer)
|
||||
struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
|
||||
{
|
||||
if (igb_can_reuse_rx_page(rx_buffer)) {
|
||||
if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
|
||||
/* hand second half of page back to the ring */
|
||||
igb_reuse_rx_page(rx_ring, rx_buffer);
|
||||
} else {
|
||||
|
|
@ -8664,6 +8672,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
|||
unsigned int xdp_xmit = 0;
|
||||
struct xdp_buff xdp;
|
||||
u32 frame_sz = 0;
|
||||
int rx_buf_pgcnt;
|
||||
|
||||
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
|
||||
#if (PAGE_SIZE < 8192)
|
||||
|
|
@ -8693,7 +8702,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
|||
*/
|
||||
dma_rmb();
|
||||
|
||||
rx_buffer = igb_get_rx_buffer(rx_ring, size);
|
||||
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
|
||||
|
||||
/* retrieve a buffer from the ring */
|
||||
if (!skb) {
|
||||
|
|
@ -8736,7 +8745,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
|
|||
break;
|
||||
}
|
||||
|
||||
igb_put_rx_buffer(rx_ring, rx_buffer);
|
||||
igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
|
||||
cleaned_count++;
|
||||
|
||||
/* fetch next buffer in frame if non-eop */
|
||||
|
|
|
|||
|
|
@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
|
|||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
#define IGB_RET_PTP_DISABLED 1
|
||||
#define IGB_RET_PTP_INVALID 2
|
||||
|
||||
/**
|
||||
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
|
||||
* @q_vector: Pointer to interrupt specific structure
|
||||
|
|
@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
|
|||
*
|
||||
* This function is meant to retrieve a timestamp from the first buffer of an
|
||||
* incoming frame. The value is stored in little endian format starting on
|
||||
* byte 8.
|
||||
* byte 8
|
||||
*
|
||||
* Returns: 0 if success, nonzero if failure
|
||||
**/
|
||||
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb)
|
||||
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
__le64 *regval = (__le64 *)va;
|
||||
struct igb_adapter *adapter = q_vector->adapter;
|
||||
__le64 *regval = (__le64 *)va;
|
||||
int adjust = 0;
|
||||
|
||||
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
|
||||
return IGB_RET_PTP_DISABLED;
|
||||
|
||||
/* The timestamp is recorded in little endian format.
|
||||
* DWORD: 0 1 2 3
|
||||
* Field: Reserved Reserved SYSTIML SYSTIMH
|
||||
*/
|
||||
|
||||
/* check reserved dwords are zero, be/le doesn't matter for zero */
|
||||
if (regval[0])
|
||||
return IGB_RET_PTP_INVALID;
|
||||
|
||||
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
|
||||
le64_to_cpu(regval[1]));
|
||||
|
||||
|
|
@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
|||
}
|
||||
skb_hwtstamps(skb)->hwtstamp =
|
||||
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
|
|||
* This function is meant to retrieve a timestamp from the internal registers
|
||||
* of the adapter and store it in the skb.
|
||||
**/
|
||||
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
|
||||
{
|
||||
struct igb_adapter *adapter = q_vector->adapter;
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
u64 regval;
|
||||
int adjust = 0;
|
||||
u64 regval;
|
||||
|
||||
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
|
||||
return;
|
||||
|
||||
/* If this bit is set, then the RX registers contain the time stamp. No
|
||||
* other packet will be time stamped until we read these registers, so
|
||||
|
|
|
|||
|
|
@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
|
|||
void igc_ptp_reset(struct igc_adapter *adapter);
|
||||
void igc_ptp_suspend(struct igc_adapter *adapter);
|
||||
void igc_ptp_stop(struct igc_adapter *adapter);
|
||||
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
|
||||
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
|
||||
struct sk_buff *skb);
|
||||
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
|
||||
|
|
|
|||
|
|
@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
|
|||
Autoneg);
|
||||
}
|
||||
|
||||
/* Set pause flow control settings */
|
||||
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
|
||||
|
||||
switch (hw->fc.requested_mode) {
|
||||
case igc_fc_full:
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
|
||||
|
|
@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
|
|||
Asym_Pause);
|
||||
break;
|
||||
default:
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
|
||||
ethtool_link_ksettings_add_link_mode(cmd, advertising,
|
||||
Asym_Pause);
|
||||
break;
|
||||
}
|
||||
|
||||
status = pm_runtime_suspended(&adapter->pdev->dev) ?
|
||||
|
|
|
|||
|
|
@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work)
|
|||
|
||||
adapter = container_of(work, struct igc_adapter, reset_task);
|
||||
|
||||
rtnl_lock();
|
||||
/* If we're already down or resetting, just bail */
|
||||
if (test_bit(__IGC_DOWN, &adapter->state) ||
|
||||
test_bit(__IGC_RESETTING, &adapter->state)) {
|
||||
rtnl_unlock();
|
||||
return;
|
||||
}
|
||||
|
||||
igc_rings_dump(adapter);
|
||||
igc_regs_dump(adapter);
|
||||
netdev_err(adapter->netdev, "Reset adapter\n");
|
||||
igc_reinit_locked(adapter);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
|
|||
}
|
||||
|
||||
/**
|
||||
* igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
|
||||
* igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
|
||||
* @q_vector: Pointer to interrupt specific structure
|
||||
* @va: Pointer to address containing Rx buffer
|
||||
* @skb: Buffer containing timestamp and packet
|
||||
*
|
||||
* This function is meant to retrieve the first timestamp from the
|
||||
* first buffer of an incoming frame. The value is stored in little
|
||||
* endian format starting on byte 0. There's a second timestamp
|
||||
* starting on byte 8.
|
||||
**/
|
||||
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
|
||||
* This function retrieves the timestamp saved in the beginning of packet
|
||||
* buffer. While two timestamps are available, one in timer0 reference and the
|
||||
* other in timer1 reference, this function considers only the timestamp in
|
||||
* timer0 reference.
|
||||
*/
|
||||
void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct igc_adapter *adapter = q_vector->adapter;
|
||||
__le64 *regval = (__le64 *)va;
|
||||
int adjust = 0;
|
||||
u64 regval;
|
||||
int adjust;
|
||||
|
||||
/* The timestamp is recorded in little endian format.
|
||||
* DWORD: | 0 | 1 | 2 | 3
|
||||
* Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
|
||||
/* Timestamps are saved in little endian at the beginning of the packet
|
||||
* buffer following the layout:
|
||||
*
|
||||
* DWORD: | 0 | 1 | 2 | 3 |
|
||||
* Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
|
||||
*
|
||||
* SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
|
||||
* part of the timestamp.
|
||||
*/
|
||||
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
|
||||
le64_to_cpu(regval[0]));
|
||||
regval = le32_to_cpu(va[2]);
|
||||
regval |= (u64)le32_to_cpu(va[3]) << 32;
|
||||
igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
|
||||
|
||||
/* adjust timestamp for the RX latency based on link speed */
|
||||
if (adapter->hw.mac.type == igc_i225) {
|
||||
switch (adapter->link_speed) {
|
||||
case SPEED_10:
|
||||
adjust = IGC_I225_RX_LATENCY_10;
|
||||
break;
|
||||
case SPEED_100:
|
||||
adjust = IGC_I225_RX_LATENCY_100;
|
||||
break;
|
||||
case SPEED_1000:
|
||||
adjust = IGC_I225_RX_LATENCY_1000;
|
||||
break;
|
||||
case SPEED_2500:
|
||||
adjust = IGC_I225_RX_LATENCY_2500;
|
||||
break;
|
||||
}
|
||||
/* Adjust timestamp for the RX latency based on link speed */
|
||||
switch (adapter->link_speed) {
|
||||
case SPEED_10:
|
||||
adjust = IGC_I225_RX_LATENCY_10;
|
||||
break;
|
||||
case SPEED_100:
|
||||
adjust = IGC_I225_RX_LATENCY_100;
|
||||
break;
|
||||
case SPEED_1000:
|
||||
adjust = IGC_I225_RX_LATENCY_1000;
|
||||
break;
|
||||
case SPEED_2500:
|
||||
adjust = IGC_I225_RX_LATENCY_2500;
|
||||
break;
|
||||
default:
|
||||
adjust = 0;
|
||||
netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
|
||||
break;
|
||||
}
|
||||
skb_hwtstamps(skb)->hwtstamp =
|
||||
ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
|
||||
|
|
|
|||
|
|
@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
|
|||
#endif
|
||||
}
|
||||
|
||||
ring->rx_offset = ixgbe_rx_offset(ring);
|
||||
|
||||
if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
|
||||
u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
|
||||
|
||||
|
|
@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
|
|||
|
||||
rx_ring->next_to_clean = 0;
|
||||
rx_ring->next_to_use = 0;
|
||||
rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
|
||||
|
||||
/* XDP RX-queue info */
|
||||
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
config NET_VENDOR_MARVELL
|
||||
bool "Marvell devices"
|
||||
default y
|
||||
depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
|
||||
depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
|
||||
help
|
||||
If you have a network (Ethernet) card belonging to this class, say Y.
|
||||
|
||||
|
|
@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL
|
|||
|
||||
config MV643XX_ETH
|
||||
tristate "Marvell Discovery (643XX) and Orion ethernet support"
|
||||
depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
|
||||
depends on PPC32 || PLAT_ORION || COMPILE_TEST
|
||||
depends on INET
|
||||
select PHYLIB
|
||||
select MVMDIO
|
||||
|
|
|
|||
|
|
@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
|
|||
MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
|
||||
#ifdef CONFIG_OF_IRQ
|
||||
#define mv643xx_eth_property(_np, _name, _v) \
|
||||
do { \
|
||||
u32 tmp; \
|
||||
|
|
|
|||
|
|
@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = {
|
|||
[NPC_LT_LC_IP] = {
|
||||
/* SIP+DIP: 8 bytes, KW2[63:0] */
|
||||
KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
|
||||
/* TOS: 1 byte, KW1[63:56] */
|
||||
KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
|
||||
},
|
||||
/* Layer C: IPv6 */
|
||||
[NPC_LT_LC_IP6] = {
|
||||
|
|
|
|||
|
|
@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
|
|||
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
|
||||
|
||||
for (irq = 0; irq < rvu->num_vec; irq++) {
|
||||
if (rvu->irq_allocated[irq])
|
||||
if (rvu->irq_allocated[irq]) {
|
||||
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
|
||||
rvu->irq_allocated[irq] = false;
|
||||
}
|
||||
}
|
||||
|
||||
pci_free_irq_vectors(rvu->pdev);
|
||||
|
|
@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev)
|
|||
struct rvu *rvu = pci_get_drvdata(pdev);
|
||||
|
||||
rvu_dbg_exit(rvu);
|
||||
rvu_unregister_interrupts(rvu);
|
||||
rvu_unregister_dl(rvu);
|
||||
rvu_unregister_interrupts(rvu);
|
||||
rvu_flr_wq_destroy(rvu);
|
||||
rvu_cgx_exit(rvu);
|
||||
rvu_fwdata_exit(rvu);
|
||||
|
|
|
|||
|
|
@ -678,6 +678,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
|
|||
u8 *intf, u8 *ena);
|
||||
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
|
||||
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
|
||||
void *rvu_first_cgx_pdata(struct rvu *rvu);
|
||||
|
||||
/* CPT APIs */
|
||||
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
|
||||
|
|
|
|||
|
|
@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
|
|||
return rvu->cgx_idmap[cgx_id];
|
||||
}
|
||||
|
||||
/* Return first enabled CGX instance if none are enabled then return NULL */
|
||||
void *rvu_first_cgx_pdata(struct rvu *rvu)
|
||||
{
|
||||
int first_enabled_cgx = 0;
|
||||
void *cgxd = NULL;
|
||||
|
||||
for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
|
||||
cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
|
||||
if (cgxd)
|
||||
break;
|
||||
}
|
||||
|
||||
return cgxd;
|
||||
}
|
||||
|
||||
/* Based on P2X connectivity find mapped NIX block for a PF */
|
||||
static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
|
||||
int cgx_id, int lmac_id)
|
||||
|
|
@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
|
|||
u32 rvu_cgx_get_fifolen(struct rvu *rvu)
|
||||
{
|
||||
struct mac_ops *mac_ops;
|
||||
int rvu_def_cgx_id = 0;
|
||||
u32 fifo_len;
|
||||
|
||||
mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
|
||||
mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
|
||||
fifo_len = mac_ops ? mac_ops->fifo_len : 0;
|
||||
|
||||
return fifo_len;
|
||||
|
|
|
|||
|
|
@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int index, off = 0, flag = 0, go_back = 0, off_prev;
|
||||
int index, off = 0, flag = 0, go_back = 0, len = 0;
|
||||
struct rvu *rvu = filp->private_data;
|
||||
int lf, pf, vf, pcifunc;
|
||||
struct rvu_block block;
|
||||
int bytes_not_copied;
|
||||
int lf_str_size = 12;
|
||||
int buf_size = 2048;
|
||||
char *lfs;
|
||||
char *buf;
|
||||
|
||||
/* don't allow partial reads */
|
||||
|
|
@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
buf = kzalloc(buf_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOSPC;
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
|
||||
|
||||
lfs = kzalloc(lf_str_size, GFP_KERNEL);
|
||||
if (!lfs) {
|
||||
kfree(buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
|
||||
"pcifunc");
|
||||
for (index = 0; index < BLK_COUNT; index++)
|
||||
if (strlen(rvu->hw->block[index].name))
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"%*s\t", (index - 1) * 2,
|
||||
rvu->hw->block[index].name);
|
||||
if (strlen(rvu->hw->block[index].name)) {
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"%-*s", lf_str_size,
|
||||
rvu->hw->block[index].name);
|
||||
}
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
|
||||
|
|
@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
continue;
|
||||
|
||||
if (vf) {
|
||||
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"PF%d:VF%d\t\t", pf,
|
||||
vf - 1);
|
||||
"%-*s", lf_str_size, lfs);
|
||||
} else {
|
||||
sprintf(lfs, "PF%d", pf);
|
||||
go_back = scnprintf(&buf[off],
|
||||
buf_size - 1 - off,
|
||||
"PF%d\t\t", pf);
|
||||
"%-*s", lf_str_size, lfs);
|
||||
}
|
||||
|
||||
off += go_back;
|
||||
|
|
@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
block = rvu->hw->block[index];
|
||||
if (!strlen(block.name))
|
||||
continue;
|
||||
off_prev = off;
|
||||
len = 0;
|
||||
lfs[len] = '\0';
|
||||
for (lf = 0; lf < block.lf.max; lf++) {
|
||||
if (block.fn_map[lf] != pcifunc)
|
||||
continue;
|
||||
flag = 1;
|
||||
off += scnprintf(&buf[off], buf_size - 1
|
||||
- off, "%3d,", lf);
|
||||
len += sprintf(&lfs[len], "%d,", lf);
|
||||
}
|
||||
if (flag && off_prev != off)
|
||||
off--;
|
||||
else
|
||||
go_back++;
|
||||
|
||||
if (flag)
|
||||
len--;
|
||||
lfs[len] = '\0';
|
||||
off += scnprintf(&buf[off], buf_size - 1 - off,
|
||||
"\t");
|
||||
"%-*s", lf_str_size, lfs);
|
||||
if (!strlen(lfs))
|
||||
go_back += lf_str_size;
|
||||
}
|
||||
if (!flag)
|
||||
off -= go_back;
|
||||
|
|
@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
|
|||
}
|
||||
|
||||
bytes_not_copied = copy_to_user(buffer, buf, off);
|
||||
kfree(lfs);
|
||||
kfree(buf);
|
||||
|
||||
if (bytes_not_copied)
|
||||
|
|
@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
|
|||
struct rvu *rvu = filp->private;
|
||||
struct pci_dev *pdev = NULL;
|
||||
struct mac_ops *mac_ops;
|
||||
int rvu_def_cgx_id = 0;
|
||||
char cgx[10], lmac[10];
|
||||
struct rvu_pfvf *pfvf;
|
||||
int pf, domain, blkid;
|
||||
|
|
@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
|
|||
u16 pcifunc;
|
||||
|
||||
domain = 2;
|
||||
mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
|
||||
mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
|
||||
/* There can be no CGX devices at all */
|
||||
if (!mac_ops)
|
||||
return 0;
|
||||
seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
|
||||
mac_ops->name);
|
||||
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
|
||||
|
|
@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
|
|||
{
|
||||
struct mac_ops *mac_ops;
|
||||
unsigned long lmac_bmap;
|
||||
int rvu_def_cgx_id = 0;
|
||||
int i, lmac_id;
|
||||
char dname[20];
|
||||
void *cgx;
|
||||
|
|
@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
|
|||
if (!cgx_get_cgxcnt_max())
|
||||
return;
|
||||
|
||||
mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
|
||||
mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
|
||||
if (!mac_ops)
|
||||
return;
|
||||
|
||||
|
|
|
|||
|
|
@ -2629,7 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
|
|||
struct nix_rx_flowkey_alg *field;
|
||||
struct nix_rx_flowkey_alg tmp;
|
||||
u32 key_type, valid_key;
|
||||
int l4_key_offset;
|
||||
int l4_key_offset = 0;
|
||||
|
||||
if (!alg)
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
|
|||
index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
|
||||
if (index >= mcam->bmap_entries)
|
||||
break;
|
||||
entry = index + 1;
|
||||
if (mcam->entry2cntr_map[index] != req->cntr)
|
||||
continue;
|
||||
|
||||
entry = index + 1;
|
||||
npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
|
||||
index, req->cntr);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -257,17 +257,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
|
|||
int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
|
||||
u32 *rule_locs)
|
||||
{
|
||||
u32 rule_cnt = nfc->rule_cnt;
|
||||
u32 location = 0;
|
||||
int idx = 0;
|
||||
int err = 0;
|
||||
|
||||
nfc->data = pfvf->flow_cfg->ntuple_max_flows;
|
||||
while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
|
||||
while ((!err || err == -ENOENT) && idx < rule_cnt) {
|
||||
err = otx2_get_flow(pfvf, nfc, location);
|
||||
if (!err)
|
||||
rule_locs[idx++] = location;
|
||||
location++;
|
||||
}
|
||||
nfc->rule_cnt = rule_cnt;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev)
|
|||
struct otx2_nic *pf = netdev_priv(netdev);
|
||||
struct otx2_cq_poll *cq_poll = NULL;
|
||||
struct otx2_qset *qset = &pf->qset;
|
||||
struct otx2_rss_info *rss;
|
||||
int qidx, vec, wrk;
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
|
@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev)
|
|||
/* First stop packet Rx/Tx */
|
||||
otx2_rxtx_enable(pf, false);
|
||||
|
||||
/* Clear RSS enable flag */
|
||||
rss = &pf->hw.rss_info;
|
||||
rss->enable = false;
|
||||
|
||||
/* Cleanup Queue IRQ */
|
||||
vec = pci_irq_vector(pf->pdev,
|
||||
pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
|
||||
|
|
|
|||
|
|
@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
|
|||
clk_disable_unprepare(pep->clk);
|
||||
mdiobus_unregister(pep->smi_bus);
|
||||
mdiobus_free(pep->smi_bus);
|
||||
unregister_netdev(dev);
|
||||
cancel_work_sync(&pep->tx_timeout_task);
|
||||
unregister_netdev(dev);
|
||||
free_netdev(dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,14 +92,15 @@ struct page_pool;
|
|||
MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
|
||||
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
|
||||
|
||||
#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
|
||||
#define MLX5_ALIGN_MTTS(mtts) (ALIGN(mtts, 8))
|
||||
#define MLX5_ALIGNED_MTTS_OCTW(mtts) ((mtts) / 2)
|
||||
#define MLX5_MTT_OCTW(mtts) (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
|
||||
/* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
|
||||
* WQEs, This page will absorb write overflow by the hardware, when
|
||||
* receiving packets larger than MTU. These oversize packets are
|
||||
* dropped by the driver at a later stage.
|
||||
*/
|
||||
#define MLX5E_REQUIRED_WQE_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
|
||||
#define MLX5E_LOG_ALIGNED_MPWQE_PPW (ilog2(MLX5E_REQUIRED_WQE_MTTS))
|
||||
#define MLX5E_REQUIRED_WQE_MTTS (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
|
||||
#define MLX5E_REQUIRED_MTTS(wqes) (wqes * MLX5E_REQUIRED_WQE_MTTS)
|
||||
#define MLX5E_MAX_RQ_NUM_MTTS \
|
||||
((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
|
||||
|
|
|
|||
|
|
@ -1181,7 +1181,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
|
|||
|
||||
mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
|
||||
&ctstate, &ctstate_mask);
|
||||
if (ctstate_mask)
|
||||
|
||||
if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
|
||||
|
|
|
|||
|
|
@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
|||
u16 vport_num;
|
||||
int err = 0;
|
||||
|
||||
if (flow_attr->ip_version == 4) {
|
||||
if (flow_attr->tun_ip_version == 4) {
|
||||
/* Addresses are swapped for decap */
|
||||
attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
|
||||
attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
|
||||
err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
|
||||
}
|
||||
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
|
||||
else if (flow_attr->ip_version == 6) {
|
||||
else if (flow_attr->tun_ip_version == 6) {
|
||||
/* Addresses are swapped for decap */
|
||||
attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
|
||||
attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
|
||||
|
|
@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
|
|||
esw_attr->rx_tun_attr->decap_vport = vport_num;
|
||||
|
||||
out:
|
||||
if (flow_attr->ip_version == 4)
|
||||
if (flow_attr->tun_ip_version == 4)
|
||||
mlx5e_route_lookup_ipv4_put(&attr);
|
||||
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
|
||||
else if (flow_attr->ip_version == 6)
|
||||
else if (flow_attr->tun_ip_version == 6)
|
||||
mlx5e_route_lookup_ipv6_put(&attr);
|
||||
#endif
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
|
|||
* required to establish routing.
|
||||
*/
|
||||
flow_flag_set(flow, TUN_RX);
|
||||
flow->attr->tun_ip_version = ip_version;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -1091,7 +1092,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
|
|||
if (err || !esw_attr->rx_tun_attr->decap_vport)
|
||||
goto out;
|
||||
|
||||
key.ip_version = attr->ip_version;
|
||||
key.ip_version = attr->tun_ip_version;
|
||||
if (key.ip_version == 4)
|
||||
key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
|
||||
else
|
||||
|
|
|
|||
|
|
@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
|
|||
option_key = (struct geneve_opt *)&enc_opts.key->data[0];
|
||||
option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
|
||||
|
||||
if (option_mask->opt_class == 0 && option_mask->type == 0 &&
|
||||
!memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
|
||||
return 0;
|
||||
|
||||
if (option_key->length > max_tlv_option_data_len) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Matching on GENEVE options: unsupported option len");
|
||||
|
|
|
|||
|
|
@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
|
|||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, cqe_compression))
|
||||
return -EOPNOTSUPP;
|
||||
|
|
@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
mlx5e_modify_rx_cqe_compression_locked(priv, enable);
|
||||
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
priv->channels.params.rx_cqe_compress_def = enable;
|
||||
|
||||
return 0;
|
||||
|
|
@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
|
|||
*/
|
||||
|
||||
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
|
||||
struct mlx5e_params old_params;
|
||||
|
||||
old_params = priv->channels.params;
|
||||
priv->channels.params = new_channels.params;
|
||||
err = mlx5e_num_channels_changed(priv);
|
||||
if (err)
|
||||
priv->channels.params = old_params;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
|
|||
rq->wqe_overflow.addr);
|
||||
}
|
||||
|
||||
static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
|
||||
static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
|
||||
{
|
||||
return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
|
||||
return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
|
||||
|
|
@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
|
||||
u32 byte_count =
|
||||
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
|
||||
u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
|
||||
u64 dma_offset = mlx5e_get_mpwqe_offset(i);
|
||||
|
||||
wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
|
||||
wqe->data[0].byte_count = cpu_to_be32(byte_count);
|
||||
|
|
@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
|
|||
{
|
||||
switch (params->rq_wq_type) {
|
||||
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
|
||||
return order_base_2(MLX5E_UMR_WQEBBS) +
|
||||
mlx5e_get_rq_log_wq_sz(rqp->rqc);
|
||||
return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
|
||||
order_base_2(MLX5E_UMR_WQEBBS) +
|
||||
mlx5e_get_rq_log_wq_sz(rqp->rqc));
|
||||
default: /* MLX5_WQ_TYPE_CYCLIC */
|
||||
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
|
||||
}
|
||||
|
|
@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (chs->port_ptp)
|
||||
if (chs->port_ptp) {
|
||||
mlx5e_port_ptp_close(chs->port_ptp);
|
||||
chs->port_ptp = NULL;
|
||||
}
|
||||
|
||||
for (i = 0; i < chs->num; i++)
|
||||
mlx5e_close_channel(chs->c[i]);
|
||||
|
|
@ -3810,6 +3813,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
|
|||
for (j = 0; j < priv->max_opened_tc; j++) {
|
||||
struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
s->tx_dropped += sq_stats->dropped;
|
||||
}
|
||||
}
|
||||
if (priv->port_ptp_opened) {
|
||||
for (i = 0; i < priv->max_opened_tc; i++) {
|
||||
struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
s->tx_dropped += sq_stats->dropped;
|
||||
|
|
@ -3834,10 +3846,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|||
}
|
||||
|
||||
if (mlx5e_is_uplink_rep(priv)) {
|
||||
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
|
||||
|
||||
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
|
||||
stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
|
||||
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
|
||||
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
|
||||
|
||||
/* vport multicast also counts packets that are dropped due to steering
|
||||
* or rx out of buffer
|
||||
*/
|
||||
stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
|
||||
} else {
|
||||
mlx5e_fold_sw_stats64(priv, stats);
|
||||
}
|
||||
|
|
@ -4683,8 +4702,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
|
|||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
|
||||
mlx5e_rq_replace_xdp_prog(&c->rq, prog);
|
||||
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
|
||||
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
|
||||
bpf_prog_inc(prog);
|
||||
mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
|
@ -4958,6 +4979,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
|
|||
priv->max_nch);
|
||||
params->num_tc = 1;
|
||||
|
||||
/* Set an initial non-zero value, so that mlx5e_select_queue won't
|
||||
* divide by zero if called before first activating channels.
|
||||
*/
|
||||
priv->num_tc_x_num_ch = params->num_channels * params->num_tc;
|
||||
|
||||
/* SQ */
|
||||
params->log_sq_size = is_kdump_kernel() ?
|
||||
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
|
||||
|
|
@ -5474,8 +5500,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
|
|||
struct net_device *netdev,
|
||||
struct mlx5_core_dev *mdev)
|
||||
{
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
|
||||
/* priv init */
|
||||
priv->mdev = mdev;
|
||||
priv->netdev = netdev;
|
||||
|
|
@ -5508,12 +5532,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
|
|||
{
|
||||
int i;
|
||||
|
||||
/* bail if change profile failed and also rollback failed */
|
||||
if (!priv->mdev)
|
||||
return;
|
||||
|
||||
destroy_workqueue(priv->wq);
|
||||
free_cpumask_var(priv->scratchpad.cpumask);
|
||||
|
||||
for (i = 0; i < priv->htb.max_qos_sqs; i++)
|
||||
kfree(priv->htb.qos_sq_stats[i]);
|
||||
kvfree(priv->htb.qos_sq_stats);
|
||||
|
||||
memset(priv, 0, sizeof(*priv));
|
||||
}
|
||||
|
||||
struct net_device *
|
||||
|
|
@ -5630,11 +5660,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
|
|||
}
|
||||
|
||||
static int
|
||||
mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
|
||||
mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
|
||||
const struct mlx5e_profile *new_profile, void *new_ppriv)
|
||||
{
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
int err;
|
||||
|
||||
err = mlx5e_priv_init(priv, netdev, mdev);
|
||||
|
|
@ -5647,10 +5676,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
|
|||
priv->ppriv = new_ppriv;
|
||||
err = new_profile->init(priv->mdev, priv->netdev);
|
||||
if (err)
|
||||
return err;
|
||||
goto priv_cleanup;
|
||||
err = mlx5e_attach_netdev(priv);
|
||||
if (err)
|
||||
new_profile->cleanup(priv);
|
||||
goto profile_cleanup;
|
||||
return err;
|
||||
|
||||
profile_cleanup:
|
||||
new_profile->cleanup(priv);
|
||||
priv_cleanup:
|
||||
mlx5e_priv_cleanup(priv);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -5659,13 +5694,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
|||
{
|
||||
unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
|
||||
const struct mlx5e_profile *orig_profile = priv->profile;
|
||||
struct net_device *netdev = priv->netdev;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
void *orig_ppriv = priv->ppriv;
|
||||
int err, rollback_err;
|
||||
|
||||
/* sanity */
|
||||
if (new_max_nch != priv->max_nch) {
|
||||
netdev_warn(priv->netdev,
|
||||
"%s: Replacing profile with different max channels\n",
|
||||
netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -5675,22 +5711,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
|
|||
priv->profile->cleanup(priv);
|
||||
mlx5e_priv_cleanup(priv);
|
||||
|
||||
err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
|
||||
err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
|
||||
if (err) { /* roll back to original profile */
|
||||
netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
|
||||
__func__, err);
|
||||
netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
|
||||
goto rollback;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
rollback:
|
||||
rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
|
||||
if (rollback_err) {
|
||||
netdev_err(priv->netdev,
|
||||
"%s: failed to rollback to orig profile, %d\n",
|
||||
rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
|
||||
if (rollback_err)
|
||||
netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
|
||||
__func__, rollback_err);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
struct mlx5e_icosq *sq = rq->icosq;
|
||||
struct mlx5_wq_cyc *wq = &sq->wq;
|
||||
struct mlx5e_umr_wqe *umr_wqe;
|
||||
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
|
||||
u16 pi;
|
||||
int err;
|
||||
int i;
|
||||
|
|
@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
|
|||
umr_wqe->ctrl.opmod_idx_opcode =
|
||||
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
|
||||
MLX5_OPCODE_UMR);
|
||||
umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
|
||||
umr_wqe->uctrl.xlt_offset =
|
||||
cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
|
||||
|
||||
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
|
||||
.wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
|
||||
|
|
|
|||
|
|
@ -2296,6 +2296,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
*match_level = MLX5_MATCH_L4;
|
||||
}
|
||||
|
||||
/* Currenlty supported only for MPLS over UDP */
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
|
||||
!netif_is_bareudp(filter_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Matching on MPLS is supported only for MPLS over UDP");
|
||||
netdev_err(priv->netdev,
|
||||
"Matching on MPLS is supported only for MPLS over UDP\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -2899,6 +2909,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
|
||||
bool ct_flow, struct netlink_ext_ack *extack,
|
||||
struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec)
|
||||
{
|
||||
if (!modify_tuple || ct_clear)
|
||||
return true;
|
||||
|
||||
if (ct_flow) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modification with non-clear ct()");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modification with non-clear ct()");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Add ct_state=-trk match so it will be offloaded for non ct flows
|
||||
* (or after clear action), as otherwise, since the tuple is changed,
|
||||
* we can't restore ct state
|
||||
*/
|
||||
if (mlx5_tc_ct_add_no_trk_match(spec)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modification with ct matches and no ct(clear) action");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modification with ct matches and no ct(clear) action");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool modify_header_match_supported(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct flow_action *flow_action,
|
||||
|
|
@ -2937,18 +2978,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Add ct_state=-trk match so it will be offloaded for non ct flows
|
||||
* (or after clear action), as otherwise, since the tuple is changed,
|
||||
* we can't restore ct state
|
||||
*/
|
||||
if (!ct_clear && modify_tuple &&
|
||||
mlx5_tc_ct_add_no_trk_match(spec)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"can't offload tuple modify header with ct matches");
|
||||
netdev_info(priv->netdev,
|
||||
"can't offload tuple modify header with ct matches");
|
||||
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
|
||||
priv, spec))
|
||||
return false;
|
||||
}
|
||||
|
||||
ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
|
||||
if (modify_ip_header && ip_proto != IPPROTO_TCP &&
|
||||
|
|
@ -4445,7 +4477,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
|
|||
*/
|
||||
if (rate) {
|
||||
rate = (rate * BITS_PER_BYTE) + 500000;
|
||||
rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
|
||||
do_div(rate, 1000000);
|
||||
rate_mbps = max_t(u32, rate, 1);
|
||||
}
|
||||
|
||||
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ struct mlx5_flow_attr {
|
|||
u8 inner_match_level;
|
||||
u8 outer_match_level;
|
||||
u8 ip_version;
|
||||
u8 tun_ip_version;
|
||||
u32 flags;
|
||||
union {
|
||||
struct mlx5_esw_flow_attr esw_attr[0];
|
||||
|
|
|
|||
|
|
@ -551,7 +551,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
|
|||
|
||||
if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
|
||||
MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
|
||||
mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
mlx5_eswitch_vport_match_metadata_enabled(esw) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
|
||||
attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
|
||||
|
||||
if (attr->dest_ft) {
|
||||
|
|
|
|||
|
|
@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
|
|||
MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
|
||||
MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
|
||||
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
|
||||
MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
|
||||
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
|
||||
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
|
||||
|
|
|
|||
|
|
@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
|
|||
}
|
||||
|
||||
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
|
||||
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
|
||||
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
|
||||
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
|
||||
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
|
||||
|
|
@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
|||
static void mlx5_rdma_netdev_free(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5i_priv *ipriv = priv->ppriv;
|
||||
const struct mlx5e_profile *profile = priv->profile;
|
||||
|
||||
|
|
@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
|
|||
|
||||
if (!ipriv->sub_interface) {
|
||||
mlx5i_pkey_qpn_ht_cleanup(netdev);
|
||||
mlx5e_destroy_mdev_resources(priv->mdev);
|
||||
mlx5e_destroy_mdev_resources(mdev);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
|
|||
return -EINVAL;
|
||||
|
||||
field_select = MLX5_MTPPS_FS_ENABLE;
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (on) {
|
||||
bool rt_mode = mlx5_real_time_mode(mdev);
|
||||
u32 nsec;
|
||||
s64 sec;
|
||||
|
||||
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
|
||||
if (pin < 0)
|
||||
return -EBUSY;
|
||||
|
||||
pin_mode = MLX5_PIN_MODE_OUT;
|
||||
pattern = MLX5_OUT_PATTERN_PERIODIC;
|
||||
ts.tv_sec = rq->perout.period.sec;
|
||||
|
|
|
|||
|
|
@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
|
|||
u16 max_functions;
|
||||
u16 function_id;
|
||||
int err = 0;
|
||||
bool ecpu;
|
||||
int i;
|
||||
|
||||
max_functions = mlx5_sf_max_functions(dev);
|
||||
function_id = MLX5_CAP_GEN(dev, sf_base_id);
|
||||
ecpu = mlx5_read_embedded_cpu(dev);
|
||||
/* Arm the vhca context as the vhca event notifier */
|
||||
for (i = 0; i < max_functions; i++) {
|
||||
err = mlx5_vhca_event_arm(dev, function_id, ecpu);
|
||||
err = mlx5_vhca_event_arm(dev, function_id);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
#include "sf.h"
|
||||
#include "mlx5_ifc_vhca_event.h"
|
||||
#include "vhca_event.h"
|
||||
#include "ecpf.h"
|
||||
#include "mlx5_core.h"
|
||||
|
||||
struct mlx5_sf_hw {
|
||||
u32 usr_sfnum;
|
||||
|
|
@ -18,7 +18,6 @@ struct mlx5_sf_hw_table {
|
|||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_sf_hw *sfs;
|
||||
int max_local_functions;
|
||||
u8 ecpu: 1;
|
||||
struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
|
||||
struct notifier_block vhca_nb;
|
||||
};
|
||||
|
|
@ -64,7 +63,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
|
|||
}
|
||||
if (sw_id == -ENOSPC) {
|
||||
err = -ENOSPC;
|
||||
goto err;
|
||||
goto exist_err;
|
||||
}
|
||||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
|
||||
|
|
@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
|
|||
if (err)
|
||||
goto err;
|
||||
|
||||
err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
|
||||
err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
|
||||
if (err)
|
||||
goto vhca_err;
|
||||
|
||||
|
|
@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
|
|||
|
||||
hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
|
||||
mutex_lock(&table->table_lock);
|
||||
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
|
||||
err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
|
||||
if (err)
|
||||
goto err;
|
||||
state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
|
||||
|
|
@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
|
|||
table->dev = dev;
|
||||
table->sfs = sfs;
|
||||
table->max_local_functions = max_functions;
|
||||
table->ecpu = mlx5_read_embedded_cpu(dev);
|
||||
dev->priv.sf_hw_table = table;
|
||||
mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits {
|
|||
|
||||
u8 sw_function_id[0x20];
|
||||
|
||||
u8 reserved_at_40[0x80];
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_vhca_state_out_bits {
|
||||
|
|
|
|||
|
|
@ -19,52 +19,51 @@ struct mlx5_vhca_event_work {
|
|||
struct mlx5_vhca_state_event event;
|
||||
};
|
||||
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *out, u32 outlen)
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
|
||||
MLX5_SET(query_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *in, u32 inlen)
|
||||
u32 *in, u32 inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
|
||||
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
|
||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
|
||||
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
|
||||
MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
|
||||
MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
|
||||
|
||||
return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
|
||||
}
|
||||
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
|
||||
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
|
||||
MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
|
||||
|
||||
return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
|
||||
return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
|
|||
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
|
||||
int err;
|
||||
|
||||
err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
|
||||
err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
|
|
@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
|
|||
event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
|
||||
vhca_state_context.vhca_state);
|
||||
|
||||
mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
|
||||
mlx5_vhca_event_arm(dev, event->function_id);
|
||||
|
||||
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
|
||||
}
|
||||
|
|
@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
|
|||
struct mlx5_core_dev *dev = notifier->dev;
|
||||
|
||||
mlx5_vhca_event_notify(dev, &work->event);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
|
|||
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
|
||||
work->notifier = notifier;
|
||||
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
|
||||
work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
|
||||
mlx5_events_work_enqueue(notifier->dev, &work->work);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ struct mlx5_vhca_state_event {
|
|||
u16 function_id;
|
||||
u16 sw_function_id;
|
||||
u8 new_vhca_state;
|
||||
bool ecpu;
|
||||
};
|
||||
|
||||
static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
|
||||
|
|
@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
|
|||
void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
|
||||
int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
|
||||
int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id);
|
||||
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
|
||||
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
|
||||
bool ecpu, u32 *out, u32 outlen);
|
||||
u32 *out, u32 outlen);
|
||||
#else
|
||||
|
||||
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
|
||||
|
|
|
|||
|
|
@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
|
|||
MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
|
||||
MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
|
||||
MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
|
||||
MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
|
||||
MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
|
||||
if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
|
||||
MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
|
||||
|
|
|
|||
|
|
@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
|
|||
static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
|
||||
{
|
||||
u64 index =
|
||||
(MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
|
||||
MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
|
||||
((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
|
||||
((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
|
||||
|
||||
return index << 6;
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue