android_kernel_msm-6.1_noth.../include/linux/interrupt.h
Greg Kroah-Hartman 2950de8b2d Merge 6.1.56 into android14-6.1-lts
Changes in 6.1.56
	NFS: Fix error handling for O_DIRECT write scheduling
	NFS: Fix O_DIRECT locking issues
	NFS: More O_DIRECT accounting fixes for error paths
	NFS: Use the correct commit info in nfs_join_page_group()
	NFS: More fixes for nfs_direct_write_reschedule_io()
	NFS/pNFS: Report EINVAL errors from connect() to the server
	SUNRPC: Mark the cred for revalidation if the server rejects it
	NFSv4.1: use EXCHGID4_FLAG_USE_PNFS_DS for DS server
	NFSv4.1: fix pnfs MDS=DS session trunking
	media: v4l: Use correct dependency for camera sensor drivers
	media: via: Use correct dependency for camera sensor drivers
	netfs: Only call folio_start_fscache() one time for each folio
	dm: fix a race condition in retrieve_deps
	btrfs: improve error message after failure to add delayed dir index item
	btrfs: remove BUG() after failure to insert delayed dir index item
	ext4: replace the traditional ternary conditional operator with with max()/min()
	ext4: move setting of trimmed bit into ext4_try_to_trim_range()
	ext4: do not let fstrim block system suspend
	netfilter: nf_tables: don't skip expired elements during walk
	netfilter: nf_tables: GC transaction API to avoid race with control plane
	netfilter: nf_tables: adapt set backend to use GC transaction API
	netfilter: nft_set_hash: mark set element as dead when deleting from packet path
	netfilter: nf_tables: remove busy mark and gc batch API
	netfilter: nf_tables: don't fail inserts if duplicate has expired
	netfilter: nf_tables: fix GC transaction races with netns and netlink event exit path
	netfilter: nf_tables: GC transaction race with netns dismantle
	netfilter: nf_tables: GC transaction race with abort path
	netfilter: nf_tables: use correct lock to protect gc_list
	netfilter: nf_tables: defer gc run if previous batch is still pending
	netfilter: nft_set_rbtree: skip sync GC for new elements in this transaction
	netfilter: nft_set_rbtree: use read spinlock to avoid datapath contention
	netfilter: nft_set_pipapo: call nft_trans_gc_queue_sync() in catchall GC
	netfilter: nft_set_pipapo: stop GC iteration if GC transaction allocation fails
	netfilter: nft_set_hash: try later when GC hits EAGAIN on iteration
	netfilter: nf_tables: fix memleak when more than 255 elements expired
	ASoC: meson: spdifin: start hw on dai probe
	netfilter: nf_tables: disallow element removal on anonymous sets
	bpf: Avoid deadlock when using queue and stack maps from NMI
	ASoC: rt5640: Revert "Fix sleep in atomic context"
	ASoC: rt5640: Fix IRQ not being free-ed for HDA jack detect mode
	ALSA: hda/realtek: Splitting the UX3402 into two separate models
	netfilter: conntrack: fix extension size table
	selftests: tls: swap the TX and RX sockets in some tests
	net/core: Fix ETH_P_1588 flow dissector
	ASoC: hdaudio.c: Add missing check for devm_kstrdup
	ASoC: imx-audmix: Fix return error with devm_clk_get()
	octeon_ep: fix tx dma unmap len values in SG
	iavf: do not process adminq tasks when __IAVF_IN_REMOVE_TASK is set
	ASoC: SOF: core: Only call sof_ops_free() on remove if the probe was successful
	iavf: add iavf_schedule_aq_request() helper
	iavf: schedule a request immediately after add/delete vlan
	i40e: Fix VF VLAN offloading when port VLAN is configured
	netfilter, bpf: Adjust timeouts of non-confirmed CTs in bpf_ct_insert_entry()
	ionic: fix 16bit math issue when PAGE_SIZE >= 64KB
	igc: Fix infinite initialization loop with early XDP redirect
	ipv4: fix null-deref in ipv4_link_failure
	scsi: iscsi_tcp: restrict to TCP sockets
	powerpc/perf/hv-24x7: Update domain value check
	dccp: fix dccp_v4_err()/dccp_v6_err() again
	x86/mm, kexec, ima: Use memblock_free_late() from ima_free_kexec_buffer()
	net: hsr: Properly parse HSRv1 supervisor frames.
	platform/x86: intel_scu_ipc: Check status after timeout in busy_loop()
	platform/x86: intel_scu_ipc: Check status upon timeout in ipc_wait_for_interrupt()
	platform/x86: intel_scu_ipc: Don't override scu in intel_scu_ipc_dev_simple_command()
	platform/x86: intel_scu_ipc: Fail IPC send if still busy
	x86/srso: Fix srso_show_state() side effect
	x86/srso: Fix SBPB enablement for spec_rstack_overflow=off
	net: hns3: add cmdq check for vf periodic service task
	net: hns3: fix GRE checksum offload issue
	net: hns3: only enable unicast promisc when mac table full
	net: hns3: fix fail to delete tc flower rules during reset issue
	net: hns3: add 5ms delay before clear firmware reset irq source
	net: bridge: use DEV_STATS_INC()
	team: fix null-ptr-deref when team device type is changed
	net: rds: Fix possible NULL-pointer dereference
	netfilter: nf_tables: disable toggling dormant table state more than once
	netfilter: ipset: Fix race between IPSET_CMD_CREATE and IPSET_CMD_SWAP
	i915/pmu: Move execlist stats initialization to execlist specific setup
	locking/seqlock: Do the lockdep annotation before locking in do_write_seqcount_begin_nested()
	net: ena: Flush XDP packets on error.
	bnxt_en: Flush XDP for bnxt_poll_nitroa0()'s NAPI
	octeontx2-pf: Do xdp_do_flush() after redirects.
	igc: Expose tx-usecs coalesce setting to user
	proc: nommu: /proc/<pid>/maps: release mmap read lock
	proc: nommu: fix empty /proc/<pid>/maps
	cifs: Fix UAF in cifs_demultiplex_thread()
	gpio: tb10x: Fix an error handling path in tb10x_gpio_probe()
	i2c: mux: demux-pinctrl: check the return value of devm_kstrdup()
	i2c: mux: gpio: Add missing fwnode_handle_put()
	i2c: xiic: Correct return value check for xiic_reinit()
	ARM: dts: BCM5301X: Extend RAM to full 256MB for Linksys EA6500 V2
	ARM: dts: samsung: exynos4210-i9100: Fix LCD screen's physical size
	ARM: dts: qcom: msm8974pro-castor: correct inverted X of touchscreen
	ARM: dts: qcom: msm8974pro-castor: correct touchscreen function names
	ARM: dts: qcom: msm8974pro-castor: correct touchscreen syna,nosleep-mode
	f2fs: optimize iteration over sparse directories
	f2fs: get out of a repeat loop when getting a locked data page
	s390/pkey: fix PKEY_TYPE_EP11_AES handling in PKEY_CLR2SECK2 IOCTL
	arm64: dts: qcom: sdm845-db845c: Mark cont splash memory region as reserved
	wifi: ath11k: fix tx status reporting in encap offload mode
	wifi: ath11k: Cleanup mac80211 references on failure during tx_complete
	scsi: qla2xxx: Select qpair depending on which CPU post_cmd() gets called
	scsi: qla2xxx: Use raw_smp_processor_id() instead of smp_processor_id()
	drm/amdkfd: Flush TLB after unmapping for GFX v9.4.3
	drm/amdkfd: Insert missing TLB flush on GFX10 and later
	btrfs: reset destination buffer when read_extent_buffer() gets invalid range
	vfio/mdev: Fix a null-ptr-deref bug for mdev_unregister_parent()
	MIPS: Alchemy: only build mmc support helpers if au1xmmc is enabled
	spi: spi-gxp: BUG: Correct spi write return value
	drm/bridge: ti-sn65dsi83: Do not generate HFP/HBP/HSA and EOT packet
	bus: ti-sysc: Use fsleep() instead of usleep_range() in sysc_reset()
	bus: ti-sysc: Fix missing AM35xx SoC matching
	firmware: arm_scmi: Harden perf domain info access
	firmware: arm_scmi: Fixup perf power-cost/microwatt support
	power: supply: mt6370: Fix missing error code in mt6370_chg_toggle_cfo()
	clk: sprd: Fix thm_parents incorrect configuration
	clk: tegra: fix error return case for recalc_rate
	ARM: dts: omap: correct indentation
	ARM: dts: ti: omap: Fix bandgap thermal cells addressing for omap3/4
	ARM: dts: Unify pwm-omap-dmtimer node names
	ARM: dts: Unify pinctrl-single pin group nodes for omap4
	ARM: dts: ti: omap: motorola-mapphone: Fix abe_clkctrl warning on boot
	bus: ti-sysc: Fix SYSC_QUIRK_SWSUP_SIDLE_ACT handling for uart wake-up
	power: supply: ucs1002: fix error code in ucs1002_get_property()
	firmware: imx-dsp: Fix an error handling path in imx_dsp_setup_channels()
	xtensa: add default definition for XCHAL_HAVE_DIV32
	xtensa: iss/network: make functions static
	xtensa: boot: don't add include-dirs
	xtensa: umulsidi3: fix conditional expression
	xtensa: boot/lib: fix function prototypes
	power: supply: rk817: Fix node refcount leak
	selftests/powerpc: Use CLEAN macro to fix make warning
	selftests/powerpc: Pass make context to children
	selftests/powerpc: Fix emit_tests to work with run_kselftest.sh
	soc: imx8m: Enable OCOTP clock for imx8mm before reading registers
	arm64: dts: imx: Add imx8mm-prt8mm.dtb to build
	firmware: arm_ffa: Don't set the memory region attributes for MEM_LEND
	gpio: pmic-eic-sprd: Add can_sleep flag for PMIC EIC chip
	i2c: npcm7xx: Fix callback completion ordering
	x86/reboot: VMCLEAR active VMCSes before emergency reboot
	ceph: drop messages from MDS when unmounting
	dma-debug: don't call __dma_entry_alloc_check_leak() under free_entries_lock
	bpf: Annotate bpf_long_memcpy with data_race
	spi: sun6i: reduce DMA RX transfer width to single byte
	spi: sun6i: fix race between DMA RX transfer completion and RX FIFO drain
	nvme-fc: Prevent null pointer dereference in nvme_fc_io_getuuid()
	parisc: sba: Fix compile warning wrt list of SBA devices
	parisc: iosapic.c: Fix sparse warnings
	parisc: drivers: Fix sparse warning
	parisc: irq: Make irq_stack_union static to avoid sparse warning
	scsi: qedf: Add synchronization between I/O completions and abort
	scsi: ufs: core: Move __ufshcd_send_uic_cmd() outside host_lock
	scsi: ufs: core: Poll HCS.UCRDY before issuing a UIC command
	selftests/ftrace: Correctly enable event in instance-event.tc
	ring-buffer: Avoid softlockup in ring_buffer_resize()
	btrfs: assert delayed node locked when removing delayed item
	selftests: fix dependency checker script
	ring-buffer: Do not attempt to read past "commit"
	net/smc: bugfix for smcr v2 server connect success statistic
	ata: sata_mv: Fix incorrect string length computation in mv_dump_mem()
	platform/mellanox: mlxbf-bootctl: add NET dependency into Kconfig
	platform/x86: asus-wmi: Support 2023 ROG X16 tablet mode
	thermal/of: add missing of_node_put()
	drm/amd/display: Don't check registers, if using AUX BL control
	drm/amdgpu/soc21: don't remap HDP registers for SR-IOV
	drm/amdgpu/nbio4.3: set proper rmmio_remap.reg_offset for SR-IOV
	drm/amdgpu: Handle null atom context in VBIOS info ioctl
	riscv: errata: fix T-Head dcache.cva encoding
	scsi: pm80xx: Use phy-specific SAS address when sending PHY_START command
	scsi: pm80xx: Avoid leaking tags when processing OPC_INB_SET_CONTROLLER_CONFIG command
	smb3: correct places where ENOTSUPP is used instead of preferred EOPNOTSUPP
	ata: libata-eh: do not clear ATA_PFLAG_EH_PENDING in ata_eh_reset()
	spi: nxp-fspi: reset the FLSHxCR1 registers
	spi: stm32: add a delay before SPI disable
	ASoC: fsl: imx-pcm-rpmsg: Add SNDRV_PCM_INFO_BATCH flag
	spi: intel-pci: Add support for Granite Rapids SPI serial flash
	bpf: Clarify error expectations from bpf_clone_redirect
	ALSA: hda: intel-sdw-acpi: Use u8 type for link index
	ASoC: cs42l42: Ensure a reset pulse meets minimum pulse width.
	ASoC: cs42l42: Don't rely on GPIOD_OUT_LOW to set RESET initially low
	firmware: cirrus: cs_dsp: Only log list of algorithms in debug build
	memblock tests: fix warning: "__ALIGN_KERNEL" redefined
	memblock tests: fix warning ‘struct seq_file’ declared inside parameter list
	ASoC: imx-rpmsg: Set ignore_pmdown_time for dai_link
	media: vb2: frame_vector.c: replace WARN_ONCE with a comment
	NFSv4.1: fix zero value filehandle in post open getattr
	ASoC: SOF: Intel: MTL: Reduce the DSP init timeout
	powerpc/watchpoints: Disable preemption in thread_change_pc()
	powerpc/watchpoint: Disable pagefaults when getting user instruction
	powerpc/watchpoints: Annotate atomic context in more places
	ncsi: Propagate carrier gain/loss events to the NCSI controller
	net: hsr: Add __packed to struct hsr_sup_tlv.
	tsnep: Fix NAPI scheduling
	tsnep: Fix NAPI polling with budget 0
	LoongArch: Set all reserved memblocks on Node#0 at initialization
	fbdev/sh7760fb: Depend on FB=y
	perf build: Define YYNOMEM as YYNOABORT for bison < 3.81
	nvme-pci: factor the iod mempool creation into a helper
	nvme-pci: factor out a nvme_pci_alloc_dev helper
	nvme-pci: do not set the NUMA node of device if it has none
	wifi: ath11k: Don't drop tx_status when peer cannot be found
	scsi: qla2xxx: Fix NULL pointer dereference in target mode
	nvme-pci: always return an ERR_PTR from nvme_pci_alloc_dev
	smack: Record transmuting in smk_transmuted
	smack: Retrieve transmuting information in smack_inode_getsecurity()
	iommu/arm-smmu-v3: Fix soft lockup triggered by arm_smmu_mm_invalidate_range
	x86/sgx: Resolves SECS reclaim vs. page fault for EAUG race
	x86/srso: Add SRSO mitigation for Hygon processors
	KVM: SVM: INTERCEPT_RDTSCP is never intercepted anyway
	KVM: SVM: Fix TSC_AUX virtualization setup
	KVM: x86/mmu: Open code leaf invalidation from mmu_notifier
	KVM: x86/mmu: Do not filter address spaces in for_each_tdp_mmu_root_yield_safe()
	mptcp: fix bogus receive window shrinkage with multiple subflows
	misc: rtsx: Fix some platforms can not boot and move the l1ss judgment to probe
	Revert "tty: n_gsm: fix UAF in gsm_cleanup_mux"
	serial: 8250_port: Check IRQ data before use
	nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
	netfilter: nf_tables: disallow rule removal from chain binding
	ALSA: hda: Disable power save for solving pop issue on Lenovo ThinkCentre M70q
	LoongArch: Define relocation types for ABI v2.10
	LoongArch: numa: Fix high_memory calculation
	ata: libata-scsi: link ata port and scsi device
	ata: libata-scsi: ignore reserved bits for REPORT SUPPORTED OPERATION CODES
	io_uring/fs: remove sqe->rw_flags checking from LINKAT
	i2c: i801: unregister tco_pdev in i801_probe() error path
	ASoC: amd: yc: Fix non-functional mic on Lenovo 82QF and 82UG
	kernel/sched: Modify initial boot task idle setup
	sched/rt: Fix live lock between select_fallback_rq() and RT push
	netfilter: nf_tables: fix kdoc warnings after gc rework
	Revert "SUNRPC dont update timeout value on connection reset"
	timers: Tag (hr)timer softirq as hotplug safe
	drm/tests: Fix incorrect argument in drm_test_mm_insert_range
	arm64: defconfig: remove CONFIG_COMMON_CLK_NPCM8XX=y
	mm/damon/vaddr-test: fix memory leak in damon_do_test_apply_three_regions()
	mm/slab_common: fix slab_caches list corruption after kmem_cache_destroy()
	mm: memcontrol: fix GFP_NOFS recursion in memory.high enforcement
	ring-buffer: Update "shortest_full" in polling
	btrfs: properly report 0 avail for very full file systems
	media: uvcvideo: Fix OOB read
	bpf: Add override check to kprobe multi link attach
	bpf: Fix BTF_ID symbol generation collision
	bpf: Fix BTF_ID symbol generation collision in tools/
	net: thunderbolt: Fix TCPv6 GSO checksum calculation
	fs/smb/client: Reset password pointer to NULL
	ata: libata-core: Fix ata_port_request_pm() locking
	ata: libata-core: Fix port and device removal
	ata: libata-core: Do not register PM operations for SAS ports
	ata: libata-sata: increase PMP SRST timeout to 10s
	drm/i915/gt: Fix reservation address in ggtt_reserve_guc_top
	power: supply: rk817: Add missing module alias
	power: supply: ab8500: Set typing and props
	fs: binfmt_elf_efpic: fix personality for ELF-FDPIC
	drm/amdkfd: Use gpu_offset for user queue's wptr
	drm/meson: fix memory leak on ->hpd_notify callback
	memcg: drop kmem.limit_in_bytes
	mm, memcg: reconsider kmem.limit_in_bytes deprecation
	ASoC: amd: yc: Fix a non-functional mic on Lenovo 82TL
	Linux 6.1.56

Change-Id: Id110614d91d6d60fb6c7622c5af82f219a84a30f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-10-27 09:17:04 +00:00

858 lines
26 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/* interrupt.h */
#ifndef _LINUX_INTERRUPT_H
#define _LINUX_INTERRUPT_H
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/sections.h>
/*
* These correspond to the IORESOURCE_IRQ_* defines in
* linux/ioport.h to select the interrupt line behaviour. When
* requesting an interrupt without specifying a IRQF_TRIGGER, the
* setting should be assumed to be "as already configured", which
* may be as per machine or firmware initialisation.
*/
#define IRQF_TRIGGER_NONE 0x00000000
#define IRQF_TRIGGER_RISING 0x00000001
#define IRQF_TRIGGER_FALLING 0x00000002
#define IRQF_TRIGGER_HIGH 0x00000004
#define IRQF_TRIGGER_LOW 0x00000008
#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
#define IRQF_TRIGGER_PROBE 0x00000010
/*
* These flags used only by the kernel as part of the
* irq handling routines.
*
* IRQF_SHARED - allow sharing the irq among several devices
* IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
* registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
* state. See Documentation/power/suspend-and-interrupts.rst
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
* resume time.
* IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
* IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
* Users will enable it explicitly by enable_irq() or enable_nmi()
* later.
* IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
* depends on IRQF_PERCPU.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_NO_DEBUG 0x00100000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
/*
* These values can be returned by request_any_context_irq() and
* describe the context the interrupt will be run in.
*
* IRQC_IS_HARDIRQ - interrupt runs in hardirq context
* IRQC_IS_NESTED - interrupt runs in a nested threaded context
*/
enum {
IRQC_IS_HARDIRQ = 0,
IRQC_IS_NESTED,
};
typedef irqreturn_t (*irq_handler_t)(int, void *);
/**
* struct irqaction - per interrupt action descriptor
* @handler: interrupt handler function
* @name: name of the device
* @dev_id: cookie to identify the device
* @percpu_dev_id: cookie to identify the device
* @next: pointer to the next irqaction for shared interrupts
* @irq: interrupt number
* @flags: flags (see IRQF_* above)
* @thread_fn: interrupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
* @secondary: pointer to secondary irqaction (force threading)
* @thread_flags: flags related to @thread
* @thread_mask: bitmask for keeping track of @thread activity
* @dir: pointer to the proc/irq/NN/name entry
*/
struct irqaction {
irq_handler_t handler;
void *dev_id;
void __percpu *percpu_dev_id;
struct irqaction *next;
irq_handler_t thread_fn;
struct task_struct *thread;
struct irqaction *secondary;
unsigned int irq;
unsigned int flags;
unsigned long thread_flags;
unsigned long thread_mask;
const char *name;
struct proc_dir_entry *dir;
} ____cacheline_internodealigned_in_smp;
extern irqreturn_t no_action(int cpl, void *dev_id);
/*
* If a (PCI) device interrupt is not connected we set dev->irq to
* IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
* can distingiush that case from other error returns.
*
* 0x80000000 is guaranteed to be outside the available range of interrupts
* and easy to distinguish from other possible incorrect values.
*/
#define IRQ_NOTCONNECTED (1U << 31)
extern int __must_check
request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
/**
* request_irq - Add a handler for an interrupt line
* @irq: The interrupt line to allocate
* @handler: Function to be called when the IRQ occurs.
* Primary handler for threaded interrupts
* If NULL, the default primary handler is installed
* @flags: Handling flags
* @name: Name of the device generating this interrupt
* @dev: A cookie passed to the handler function
*
* This call allocates an interrupt and establishes a handler; see
* the documentation for request_threaded_irq() for details.
*/
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
{
return request_threaded_irq(irq, handler, NULL, flags, name, dev);
}
extern int __must_check
request_any_context_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev_id);
extern int __must_check
__request_percpu_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *devname,
void __percpu *percpu_dev_id);
extern int __must_check
request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev);
static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
{
return __request_percpu_irq(irq, handler, 0,
devname, percpu_dev_id);
}
extern int __must_check
request_percpu_nmi(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *dev);
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
extern const void *free_nmi(unsigned int irq, void *dev_id);
extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
struct device;
extern int __must_check
devm_request_threaded_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, irq_handler_t thread_fn,
unsigned long irqflags, const char *devname,
void *dev_id);
static inline int __must_check
devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
unsigned long irqflags, const char *devname, void *dev_id)
{
return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
devname, dev_id);
}
extern int __must_check
devm_request_any_context_irq(struct device *dev, unsigned int irq,
irq_handler_t handler, unsigned long irqflags,
const char *devname, void *dev_id);
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq);
extern void disable_percpu_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
extern void enable_percpu_irq(unsigned int irq, unsigned int type);
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
extern void disable_nmi_nosync(unsigned int irq);
extern void disable_percpu_nmi(unsigned int irq);
extern void enable_nmi(unsigned int irq);
extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq);
extern int irq_inject_interrupt(unsigned int irq);
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
extern void rearm_wake_irq(unsigned int irq);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
* @release: Function to be called on release. This will be
* called in process context. Once registered, the
* structure must only be freed when this function is
* called or later.
*/
struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
struct work_struct work;
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
void (*release)(struct kref *ref);
};
#define IRQ_AFFINITY_MAX_SETS 4
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
* @nr_sets: The number of interrupt sets for which affinity
* spreading is required
* @set_size: Array holding the size of each interrupt set
* @calc_sets: Callback for calculating the number and size
* of interrupt sets
* @priv: Private data for usage by @calc_sets, usually a
* pointer to driver/device specific data.
*/
struct irq_affinity {
unsigned int pre_vectors;
unsigned int post_vectors;
unsigned int nr_sets;
unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
void *priv;
};
/**
* struct irq_affinity_desc - Interrupt affinity descriptor
* @mask: cpumask to hold the affinity assignment
* @is_managed: 1 if the interrupt is managed internally
*/
struct irq_affinity_desc {
struct cpumask mask;
unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
extern cpumask_var_t irq_default_affinity;
extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
bool setaffinity);
/**
* irq_update_affinity_hint - Update the affinity hint
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint, but does not change the affinity of the interrupt.
*/
static inline int
irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, false);
}
/**
* irq_set_affinity_and_hint - Update the affinity hint and apply the provided
* cpumask to the interrupt
* @irq: Interrupt to update
* @m: cpumask pointer (NULL to clear the hint)
*
* Updates the affinity hint and if @m is not NULL it applies it as the
* affinity of that interrupt.
*/
static inline int
irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
{
return __irq_apply_affinity_hint(irq, m, true);
}
/*
* Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
* instead.
*/
static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
{
return irq_set_affinity_and_hint(irq, m);
}
extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd);
#else /* CONFIG_SMP */
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
{
return 0;
}
static inline int irq_can_set_affinity(unsigned int irq)
{
return 0;
}
static inline int irq_select_affinity(unsigned int irq) { return 0; }
static inline int irq_update_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_and_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
return -EINVAL;
}
static inline int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity)
{
return -EINVAL;
}
static inline int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
{
return 0;
}
static inline struct irq_affinity_desc *
irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
{
return NULL;
}
static inline unsigned int
irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
const struct irq_affinity *affd)
{
return maxvec;
}
#endif /* CONFIG_SMP */
/*
* Special lockdep variants of irq disabling/enabling.
* These should be used for locking constructs that
* know that a particular irq context which is disabled,
* and which is the only irq-context user of a lock,
* that it's safe to take the lock in the irq-disabled
* section without disabling hardirqs.
*
* On !CONFIG_LOCKDEP they are equivalent to the normal
* irq disable/enable methods.
*/
static inline void disable_irq_nosync_lockdep(unsigned int irq)
{
disable_irq_nosync(irq);
#ifdef CONFIG_LOCKDEP
local_irq_disable();
#endif
}
static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
{
disable_irq_nosync(irq);
#ifdef CONFIG_LOCKDEP
local_irq_save(*flags);
#endif
}
static inline void disable_irq_lockdep(unsigned int irq)
{
disable_irq(irq);
#ifdef CONFIG_LOCKDEP
local_irq_disable();
#endif
}
static inline void enable_irq_lockdep(unsigned int irq)
{
#ifdef CONFIG_LOCKDEP
local_irq_enable();
#endif
enable_irq(irq);
}
static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
{
#ifdef CONFIG_LOCKDEP
local_irq_restore(*flags);
#endif
enable_irq(irq);
}
/* IRQ wakeup (PM) control: */
extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
static inline int enable_irq_wake(unsigned int irq)
{
return irq_set_irq_wake(irq, 1);
}
static inline int disable_irq_wake(unsigned int irq)
{
return irq_set_irq_wake(irq, 0);
}
/*
* irq_get_irqchip_state/irq_set_irqchip_state specific flags
*/
enum irqchip_irq_state {
IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
};
extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool *state);
extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
# ifdef CONFIG_PREEMPT_RT
# define force_irqthreads() (true)
# else
DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
# endif
#else
#define force_irqthreads() (false)
#endif
#ifndef local_softirq_pending
#ifndef local_softirq_pending_ref
#define local_softirq_pending_ref irq_stat.__softirq_pending
#endif
#define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
/**
* __cpu_softirq_pending() - Checks to see if softirq is pending on a cpu
*
* This helper is inherently racy, as we're accessing per-cpu data w/o locks.
* But peeking at the flag can still be useful when deciding where to place a
* task.
*/
static inline u32 __cpu_softirq_pending(int cpu)
{
return (u32)per_cpu(local_softirq_pending_ref, cpu);
}
#endif /* local_softirq_pending */
/* Some architectures might implement lazy enabling/disabling of
* interrupts. In some cases, such as stop_machine, we might want
* to ensure that after a local_irq_disable(), interrupts have
* really been disabled in hardware. Such architectures need to
* implement the following hook.
*/
#ifndef hard_irq_disable
#define hard_irq_disable() do { } while(0)
#endif
/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
frequency threaded job scheduling. For almost all the purposes
tasklets are more than enough. F.e. all serial device BHs et
al. should be converted to tasklets, not to softirqs.
*/
enum
{
HI_SOFTIRQ=0,
TIMER_SOFTIRQ,
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
BLOCK_SOFTIRQ,
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
/*
* The following vectors can be safely ignored after ksoftirqd is parked:
*
* _ RCU:
* 1) rcutree_migrate_callbacks() migrates the queue.
* 2) rcu_report_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
*
* _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
/* Softirq's where the handling might be long: */
#define LONG_SOFTIRQ_MASK (BIT(NET_TX_SOFTIRQ) | \
BIT(NET_RX_SOFTIRQ) | \
BIT(BLOCK_SOFTIRQ) | \
BIT(IRQ_POLL_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
*/
extern const char * const softirq_to_name[NR_SOFTIRQS];
/* softirq mask and active fields moved to irq_cpustat_t in
* asm/hardirq.h to get better cache usage. KAO
*/
struct softirq_action
{
void (*action)(struct softirq_action *);
};
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
#ifdef CONFIG_PREEMPT_RT
extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
#else
static inline void do_softirq_post_smp_call_flush(unsigned int unused)
{
do_softirq();
}
#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
DECLARE_PER_CPU(u32, active_softirqs);
#endif
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
return this_cpu_read(ksoftirqd);
}
/* Tasklets --- multithreaded analogue of BHs.
This API is deprecated. Please consider using threaded IRQs instead:
https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
Main feature differing them of BHs: different tasklets
may be run simultaneously on different CPUs.
Properties:
* If tasklet_schedule() is called, then tasklet is guaranteed
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
* If this tasklet is already running on another CPU (or schedule is called
from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
*/
struct tasklet_struct
{
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
bool use_callback;
union {
void (*func)(unsigned long data);
void (*callback)(struct tasklet_struct *t);
};
unsigned long data;
};
#define DECLARE_TASKLET(name, _callback) \
struct tasklet_struct name = { \
.count = ATOMIC_INIT(0), \
.callback = _callback, \
.use_callback = true, \
}
#define DECLARE_TASKLET_DISABLED(name, _callback) \
struct tasklet_struct name = { \
.count = ATOMIC_INIT(1), \
.callback = _callback, \
.use_callback = true, \
}
#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
#define DECLARE_TASKLET_OLD(name, _func) \
struct tasklet_struct name = { \
.count = ATOMIC_INIT(0), \
.func = _func, \
}
#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
struct tasklet_struct name = { \
.count = ATOMIC_INIT(1), \
.func = _func, \
}
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}
void tasklet_unlock(struct tasklet_struct *t);
void tasklet_unlock_wait(struct tasklet_struct *t);
void tasklet_unlock_spin_wait(struct tasklet_struct *t);
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }
static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
#endif
extern void __tasklet_schedule(struct tasklet_struct *t);
static inline void tasklet_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_schedule(t);
}
extern void __tasklet_hi_schedule(struct tasklet_struct *t);
static inline void tasklet_hi_schedule(struct tasklet_struct *t)
{
if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
__tasklet_hi_schedule(t);
}
static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
atomic_inc(&t->count);
smp_mb__after_atomic();
}
/*
* Do not use in new code. Disabling tasklets from atomic contexts is
* error prone and should be avoided.
*/
static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
tasklet_unlock_spin_wait(t);
smp_mb();
}
static inline void tasklet_disable(struct tasklet_struct *t)
{
tasklet_disable_nosync(t);
tasklet_unlock_wait(t);
smp_mb();
}
static inline void tasklet_enable(struct tasklet_struct *t)
{
smp_mb__before_atomic();
atomic_dec(&t->count);
}
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
extern void tasklet_setup(struct tasklet_struct *t,
void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
*
* probe_irq_on() and probe_irq_off() provide robust primitives
* for accurate IRQ probing during kernel initialization. They are
* reasonably simple to use, are not "fooled" by spurious interrupts,
* and, unlike other attempts at IRQ probing, they do not get hung on
* stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
*
* For reasonably foolproof probing, use them as follows:
*
* 1. clear and/or mask the device's internal interrupt.
* 2. sti();
* 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
* 4. enable the device and cause it to trigger an interrupt.
* 5. wait for the device to interrupt, using non-intrusive polling or a delay.
* 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
* 7. service the device to clear its pending interrupt.
* 8. loop again if paranoia is required.
*
* probe_irq_on() returns a mask of allocated irq's.
*
* probe_irq_off() takes the mask as a parameter,
* and returns the irq number which occurred,
* or zero if none occurred, or a negative irq number
* if more than one irq occurred.
*/
#if !defined(CONFIG_GENERIC_IRQ_PROBE)
static inline unsigned long probe_irq_on(void)
{
return 0;
}
static inline int probe_irq_off(unsigned long val)
{
return 0;
}
static inline unsigned int probe_irq_mask(unsigned long val)
{
return 0;
}
#else
extern unsigned long probe_irq_on(void); /* returns 0 on failure */
extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
#endif
#ifdef CONFIG_PROC_FS
/* Initialize /proc/irq/ */
extern void init_irq_proc(void);
#else
static inline void init_irq_proc(void)
{
}
#endif
#ifdef CONFIG_IRQ_TIMINGS
void irq_timings_enable(void);
void irq_timings_disable(void);
u64 irq_timings_next_event(u64 now);
#endif
struct seq_file;
int show_interrupts(struct seq_file *p, void *v);
int arch_show_interrupts(struct seq_file *p, int prec);
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
#ifndef __irq_entry
# define __irq_entry __section(".irqentry.text")
#endif
#define __softirq_entry __section(".softirqentry.text")
#endif