android_kernel_msm-6.1_noth.../kernel/time/tick-common.c
Greg Kroah-Hartman a09603eb2f This is the 6.1.36 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmSb+ZMACgkQONu9yGCS
 aT7qORAAmbYIAtIdWp+3wAM8g9TihQAeknz6f7Q8sxUB1EkZKJ9TgnFdR1IPPcBI
 BWMUNutyUtHY90XTlZbVM04P070FafWjyT23+sdNG+pZGv+sOJkTHO6NgvkFlt0m
 doOq9ojOe6hXS5oqK+8grfbwBG0VDUv7HHuUKsGBLhlDAHP58sVqtkrpiK2EiJpx
 WGIR1t7gPd7jIxsnWTSurdjGfrAUw3SmE07K6sjwGgHsc2Mvd5vluQ+ljnmlz2qd
 3WMyHymIhNP69/HY5Zz6sqCNGJ0eglp6IP8VPw9a7eGDu1UNp2Gu+P5ZB4FR7ABg
 Rbsvrkr/08S9on0OSFiYJ11sfbzdIb4AfGdSHnUpeuqBp5ak1JS5jE6eSiy9YZU/
 V9wDFdlDDwwORCWTMJzcTvhtlzWI+BkKq0bZEiYSxeCZ6m5RKi0i6X/lOPFt/ihA
 PfEHGZVZ12atEEnYm5iich8Frqyp3nOYJKR972/zeKkkcWoYslFA6KuA3PA3eV7S
 rdbz3hK6T3kZTe7FUvmghdi1lGgIKYy8IOiqY9tbMHWa3YQ7k5ZA2BZOiCEri0RF
 tfzT1wI4DknbEXv5fs5PQ8c8eYMXaFKxdZ4+ndfB7f/jPn6IEK2xb5VtbnLe/NIE
 qeRtanzccoKh8P7CmnwWqQ4CaqVeZTFrQ3jiadptSbpTnt3qzlM=
 =oSd/
 -----END PGP SIGNATURE-----

Merge 6.1.36 into android14-6.1-lts

Changes in 6.1.36
	drm/amd/display: Use dc_update_planes_and_stream
	drm/amd/display: Add wrapper to call planes and stream update
	drm/amd/display: fix the system hang while disable PSR
	tty: serial: fsl_lpuart: make rx_watermark configurable for different platforms
	tty: serial: fsl_lpuart: reduce RX watermark to 0 on LS1028A
	ata: libata-scsi: Avoid deadlock on rescan after device resume
	mm: Fix copy_from_user_nofault().
	tpm, tpm_tis: Claim locality in interrupt handler
	tpm_crb: Add support for CRB devices based on Pluton
	ksmbd: validate command payload size
	ksmbd: fix out-of-bound read in smb2_write
	ksmbd: validate session id and tree id in the compound request
	tick/common: Align tick period during sched_timer setup
	selftests: mptcp: remove duplicated entries in usage
	selftests: mptcp: join: fix ShellCheck warnings
	selftests: mptcp: lib: skip if missing symbol
	selftests: mptcp: connect: skip transp tests if not supported
	selftests: mptcp: connect: skip disconnect tests if not supported
	selftests: mptcp: pm nl: remove hardcoded default limits
	selftests: mptcp: pm nl: skip fullmesh flag checks if not supported
	selftests: mptcp: sockopt: relax expected returned size
	selftests: mptcp: sockopt: skip getsockopt checks if not supported
	selftests: mptcp: userspace pm: skip if 'ip' tool is unavailable
	selftests: mptcp: userspace pm: skip if not supported
	selftests: mptcp: lib: skip if not below kernel version
	selftests: mptcp: join: use 'iptables-legacy' if available
	selftests: mptcp: join: helpers to skip tests
	selftests: mptcp: join: skip check if MIB counter not supported
	selftests: mptcp: join: support local endpoint being tracked or not
	selftests: mptcp: join: skip Fastclose tests if not supported
	selftests: mptcp: join: support RM_ADDR for used endpoints or not
	selftests: mptcp: join: skip implicit tests if not supported
	selftests: mptcp: join: skip backup if set flag on ID not supported
	selftests: mptcp: join: skip fullmesh flag tests if not supported
	selftests: mptcp: join: skip MPC backups tests if not supported
	selftests/mount_setattr: fix redefine struct mount_attr build error
	selftests: mptcp: diag: skip listen tests if not supported
	selftests: mptcp: sockopt: skip TCP_INQ checks if not supported
	selftests: mptcp: join: skip test if iptables/tc cmds fail
	selftests: mptcp: join: skip userspace PM tests if not supported
	selftests: mptcp: join: skip fail tests if not supported
	selftests: mptcp: join: fix "userspace pm add & remove address"
	writeback: fix dereferencing NULL mapping->host on writeback_page_template
	scripts: fix the gfp flags header path in gfp-translate
	nilfs2: fix buffer corruption due to concurrent device reads
	ACPI: sleep: Avoid breaking S3 wakeup due to might_sleep()
	KVM: Avoid illegal stage2 mapping on invalid memory slot
	Drivers: hv: vmbus: Call hv_synic_free() if hv_synic_alloc() fails
	Drivers: hv: vmbus: Fix vmbus_wait_for_unload() to scan present CPUs
	PCI: hv: Fix a race condition bug in hv_pci_query_relations()
	Revert "PCI: hv: Fix a timing issue which causes kdump to fail occasionally"
	PCI: hv: Remove the useless hv_pcichild_state from struct hv_pci_dev
	PCI: hv: Fix a race condition in hv_irq_unmask() that can cause panic
	PCI: hv: Add a per-bus mutex state_lock
	io_uring/net: clear msg_controllen on partial sendmsg retry
	io_uring/net: disable partial retries for recvmsg with cmsg
	mptcp: handle correctly disconnect() failures
	mptcp: fix possible divide by zero in recvmsg()
	mptcp: fix possible list corruption on passive MPJ
	mptcp: consolidate fallback and non fallback state machine
	cgroup: Do not corrupt task iteration when rebinding subsystem
	cgroup,freezer: hold cpu_hotplug_lock before freezer_mutex in freezer_css_{online,offline}()
	mmc: litex_mmc: set PROBE_PREFER_ASYNCHRONOUS
	mmc: sdhci-msm: Disable broken 64-bit DMA on MSM8916
	mmc: meson-gx: remove redundant mmc_request_done() call from irq context
	mmc: mmci: stm32: fix max busy timeout calculation
	mmc: sdhci-spear: fix deferred probing
	mmc: bcm2835: fix deferred probing
	mmc: sunxi: fix deferred probing
	bpf: ensure main program has an extable
	wifi: iwlwifi: pcie: Handle SO-F device for PCI id 0x7AF0
	spi: spi-geni-qcom: correctly handle -EPROBE_DEFER from dma_request_chan()
	regulator: pca9450: Fix LDO3OUT and LDO4OUT MASK
	regmap: spi-avmm: Fix regmap_bus max_raw_write
	arm64: dts: rockchip: Fix rk356x PCIe register and range mappings
	io_uring/poll: serialize poll linked timer start with poll removal
	nilfs2: prevent general protection fault in nilfs_clear_dirty_page()
	x86/mm: Avoid using set_pgd() outside of real PGD pages
	memfd: check for non-NULL file_seals in memfd_create() syscall
	mmc: meson-gx: fix deferred probing
	ieee802154: hwsim: Fix possible memory leaks
	xfrm: Treat already-verified secpath entries as optional
	xfrm: interface: rename xfrm_interface.c to xfrm_interface_core.c
	xfrm: Ensure policies always checked on XFRM-I input path
	KVM: arm64: PMU: Restore the host's PMUSERENR_EL0
	bpf: track immediate values written to stack by BPF_ST instruction
	bpf: Fix verifier id tracking of scalars on spill
	xfrm: fix inbound ipv4/udp/esp packets to UDPv6 dualstack sockets
	bpf: Fix a bpf_jit_dump issue for x86_64 with sysctl bpf_jit_enable.
	selftests: net: tls: check if FIPS mode is enabled
	selftests: net: vrf-xfrm-tests: change authentication and encryption algos
	selftests: net: fcnal-test: check if FIPS mode is enabled
	xfrm: Linearize the skb after offloading if needed.
	net/mlx5: DR, Fix wrong action data allocation in decap action
	sfc: use budget for TX completions
	net: qca_spi: Avoid high load if QCA7000 is not available
	mmc: mtk-sd: fix deferred probing
	mmc: mvsdio: fix deferred probing
	mmc: omap: fix deferred probing
	mmc: omap_hsmmc: fix deferred probing
	mmc: owl: fix deferred probing
	mmc: sdhci-acpi: fix deferred probing
	mmc: sh_mmcif: fix deferred probing
	mmc: usdhi60rol0: fix deferred probing
	ipvs: align inner_mac_header for encapsulation
	net: dsa: mt7530: fix trapping frames on non-MT7621 SoC MT7530 switch
	net: dsa: mt7530: fix handling of BPDUs on MT7530 switch
	net: dsa: mt7530: fix handling of LLDP frames
	be2net: Extend xmit workaround to BE3 chip
	netfilter: nf_tables: fix chain binding transaction logic
	netfilter: nf_tables: add NFT_TRANS_PREPARE_ERROR to deal with bound set/chain
	netfilter: nf_tables: drop map element references from preparation phase
	netfilter: nft_set_pipapo: .walk does not deal with generations
	netfilter: nf_tables: disallow element updates of bound anonymous sets
	netfilter: nf_tables: reject unbound anonymous set before commit phase
	netfilter: nf_tables: reject unbound chain set before commit phase
	netfilter: nf_tables: disallow updates of anonymous sets
	netfilter: nfnetlink_osf: fix module autoload
	Revert "net: phy: dp83867: perform soft reset and retain established link"
	bpf/btf: Accept function names that contain dots
	bpf: Force kprobe multi expected_attach_type for kprobe_multi link
	io_uring/net: use the correct msghdr union member in io_sendmsg_copy_hdr
	selftests: forwarding: Fix race condition in mirror installation
	platform/x86/amd/pmf: Register notify handler only if SPS is enabled
	sch_netem: acquire qdisc lock in netem_change()
	revert "net: align SO_RCVMARK required privileges with SO_MARK"
	arm64: dts: rockchip: Enable GPU on SOQuartz CM4
	arm64: dts: rockchip: fix nEXTRST on SOQuartz
	gpiolib: Fix GPIO chip IRQ initialization restriction
	gpio: sifive: add missing check for platform_get_irq
	gpiolib: Fix irq_domain resource tracking for gpiochip_irqchip_add_domain()
	scsi: target: iscsi: Prevent login threads from racing between each other
	HID: wacom: Add error check to wacom_parse_and_register()
	arm64: Add missing Set/Way CMO encodings
	smb3: missing null check in SMB2_change_notify
	media: cec: core: disable adapter in cec_devnode_unregister
	media: cec: core: don't set last_initiator if tx in progress
	nfcsim.c: Fix error checking for debugfs_create_dir
	btrfs: fix an uninitialized variable warning in btrfs_log_inode
	usb: gadget: udc: fix NULL dereference in remove()
	nvme: double KA polling frequency to avoid KATO with TBKAS on
	nvme: check IO start time when deciding to defer KA
	nvme: improve handling of long keep alives
	Input: soc_button_array - add invalid acpi_index DMI quirk handling
	arm64: dts: qcom: sc7280-idp: drop incorrect dai-cells from WCD938x SDW
	arm64: dts: qcom: sc7280-qcard: drop incorrect dai-cells from WCD938x SDW
	s390/cio: unregister device when the only path is gone
	spi: lpspi: disable lpspi module irq in DMA mode
	ASoC: codecs: wcd938x-sdw: do not set can_multi_write flag
	ASoC: simple-card: Add missing of_node_put() in case of error
	soundwire: dmi-quirks: add new mapping for HP Spectre x360
	soundwire: qcom: add proper error paths in qcom_swrm_startup()
	ASoC: nau8824: Add quirk to active-high jack-detect
	ASoC: amd: yc: Add Thinkpad Neo14 to quirks list for acp6x
	gfs2: Don't get stuck writing page onto itself under direct I/O
	s390/purgatory: disable branch profiling
	ASoC: fsl_sai: Enable BCI bit if SAI works on synchronous mode with BYP asserted
	ALSA: hda/realtek: Add "Intel Reference board" and "NUC 13" SSID in the ALC256
	i2c: mchp-pci1xxxx: Avoid cast to incompatible function type
	ARM: dts: Fix erroneous ADS touchscreen polarities
	null_blk: Fix: memory release when memory_backed=1
	drm/exynos: vidi: fix a wrong error return
	drm/exynos: fix race condition UAF in exynos_g2d_exec_ioctl
	drm/radeon: fix race condition UAF in radeon_gem_set_domain_ioctl
	vhost_vdpa: tell vqs about the negotiated
	vhost_net: revert upend_idx only on retriable error
	KVM: arm64: Restore GICv2-on-GICv3 functionality
	x86/apic: Fix kernel panic when booting with intremap=off and x2apic_phys
	i2c: imx-lpi2c: fix type char overflow issue when calculating the clock cycle
	smb: move client and server files to common directory fs/smb
	Linux 6.1.36

Note, this "merges away" commit:
	679354bea0 ("KVM: arm64: Restore GICv2-on-GICv3 functionality")
because of the merge conflicts were too complex to resolve.  If this is
needed in the android tree it should be brought back as an individual
change in the future.

Change-Id: Iafee89e3fb40c8bd631e6cc22eaaf7453b82f727
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2023-06-28 10:19:08 +00:00

580 lines
15 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* This file contains the base functions to manage periodic tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/interrupt.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <trace/events/power.h>
#include <trace/hooks/sched.h>
#include <asm/irq_regs.h>
#include "tick-internal.h"
/*
* Tick devices
*/
DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
/*
* Tick next event: keeps track of the tick time. It's updated by the
* CPU which handles the tick and protected by jiffies_lock. There is
* no requirement to write hold the jiffies seqcount for it.
*/
ktime_t tick_next_period;
/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
* which is responsible for calling do_timer(), i.e. the timekeeping stuff. This
* variable has two functions:
*
* 1) Prevent a thundering herd issue of a gazillion of CPUs trying to grab the
* timekeeping lock all at once. Only the CPU which is assigned to do the
* update is handling it.
*
* 2) Hand off the duty in the NOHZ idle case by setting the value to
* TICK_DO_TIMER_NONE, i.e. a non existing CPU. So the next cpu which looks
* at it will take over and keep the time keeping alive. The handover
* procedure also covers cpu hotplug.
*/
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
#ifdef CONFIG_NO_HZ_FULL
/*
* tick_do_timer_boot_cpu indicates the boot CPU temporarily owns
* tick_do_timer_cpu and it should be taken over by an eligible secondary
* when one comes online.
*/
static int tick_do_timer_boot_cpu __read_mostly = -1;
#endif
/*
* Debugging: see timer_list.c
*/
struct tick_device *tick_get_device(int cpu)
{
return &per_cpu(tick_cpu_device, cpu);
}
/**
* tick_is_oneshot_available - check for a oneshot capable event device
*/
int tick_is_oneshot_available(void)
{
struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
return 0;
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 1;
return tick_broadcast_oneshot_available();
}
/*
* Periodic tick
*/
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
raw_spin_lock(&jiffies_lock);
write_seqcount_begin(&jiffies_seq);
/* Keep track of the next tick event */
tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);
do_timer(1);
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
update_wall_time();
trace_android_vh_jiffies_update(NULL);
}
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
}
/*
* Event handler for periodic ticks
*/
void tick_handle_periodic(struct clock_event_device *dev)
{
int cpu = smp_processor_id();
ktime_t next = dev->next_event;
tick_periodic(cpu);
#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
/*
* The cpu might have transitioned to HIGHRES or NOHZ mode via
* update_process_times() -> run_local_timers() ->
* hrtimer_run_queues().
*/
if (dev->event_handler != tick_handle_periodic)
return;
#endif
if (!clockevent_state_oneshot(dev))
return;
for (;;) {
/*
* Setup the next period for devices, which do not have
* periodic mode:
*/
next = ktime_add_ns(next, TICK_NSEC);
if (!clockevents_program_event(dev, next, false))
return;
/*
* Have to be careful here. If we're in oneshot mode,
* before we call tick_periodic() in a loop, we need
* to be sure we're using a real hardware clocksource.
* Otherwise we could get trapped in an infinite
* loop, as the tick_periodic() increments jiffies,
* which then will increment time, possibly causing
* the loop to trigger again and again.
*/
if (timekeeping_valid_for_hres())
tick_periodic(cpu);
}
}
/*
* Setup the device for a periodic tick
*/
void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
{
tick_set_periodic_handler(dev, broadcast);
/* Broadcast setup ? */
if (!tick_device_is_functional(dev))
return;
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
!tick_broadcast_oneshot_active()) {
clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
} else {
unsigned int seq;
ktime_t next;
do {
seq = read_seqcount_begin(&jiffies_seq);
next = tick_next_period;
} while (read_seqcount_retry(&jiffies_seq, seq));
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
next = ktime_add_ns(next, TICK_NSEC);
}
}
}
#ifdef CONFIG_NO_HZ_FULL
static void giveup_do_timer(void *info)
{
int cpu = *(unsigned int *)info;
WARN_ON(tick_do_timer_cpu != smp_processor_id());
tick_do_timer_cpu = cpu;
}
static void tick_take_do_timer_from_boot(void)
{
int cpu = smp_processor_id();
int from = tick_do_timer_boot_cpu;
if (from >= 0 && from != cpu)
smp_call_function_single(from, giveup_do_timer, &cpu, 1);
}
#endif
/*
* Setup the tick device
*/
static void tick_setup_device(struct tick_device *td,
struct clock_event_device *newdev, int cpu,
const struct cpumask *cpumask)
{
void (*handler)(struct clock_event_device *) = NULL;
ktime_t next_event = 0;
/*
* First device setup ?
*/
if (!td->evtdev) {
/*
* If no cpu took the do_timer update, assign it to
* this cpu:
*/
if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
tick_do_timer_cpu = cpu;
tick_next_period = ktime_get();
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
* tick_do_timer_boot_cpu so the first housekeeping
* secondary that comes up will take do_timer from
* us.
*/
if (tick_nohz_full_cpu(cpu))
tick_do_timer_boot_cpu = cpu;
} else if (tick_do_timer_boot_cpu != -1 &&
!tick_nohz_full_cpu(cpu)) {
tick_take_do_timer_from_boot();
tick_do_timer_boot_cpu = -1;
WARN_ON(tick_do_timer_cpu != cpu);
#endif
}
/*
* Startup in periodic mode first.
*/
td->mode = TICKDEV_MODE_PERIODIC;
} else {
handler = td->evtdev->event_handler;
next_event = td->evtdev->next_event;
td->evtdev->event_handler = clockevents_handle_noop;
}
td->evtdev = newdev;
/*
* When the device is not per cpu, pin the interrupt to the
* current cpu:
*/
if (!cpumask_equal(newdev->cpumask, cpumask))
irq_set_affinity(newdev->irq, cpumask);
/*
* When global broadcasting is active, check if the current
* device is registered as a placeholder for broadcast mode.
* This allows us to handle this x86 misfeature in a generic
* way. This function also returns !=0 when we keep the
* current active broadcast state for this CPU.
*/
if (tick_device_uses_broadcast(newdev, cpu))
return;
if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(newdev, 0);
else
tick_setup_oneshot(newdev, handler, next_event);
}
void tick_install_replacement(struct clock_event_device *newdev)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int cpu = smp_processor_id();
clockevents_exchange_device(td->evtdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
}
static bool tick_check_percpu(struct clock_event_device *curdev,
struct clock_event_device *newdev, int cpu)
{
if (!cpumask_test_cpu(cpu, newdev->cpumask))
return false;
if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
return true;
/* Check if irq affinity can be set */
if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
return false;
/* Prefer an existing cpu local device */
if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
return false;
return true;
}
static bool tick_check_preferred(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
/* Prefer oneshot capable device */
if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
return false;
if (tick_oneshot_mode_active())
return false;
}
/*
* Use the higher rated one, but prefer a CPU local device with a lower
* rating than a non-CPU local device
*/
return !curdev ||
newdev->rating > curdev->rating ||
!cpumask_equal(curdev->cpumask, newdev->cpumask);
}
/*
* Check whether the new device is a better fit than curdev. curdev
* can be NULL !
*/
bool tick_check_replacement(struct clock_event_device *curdev,
struct clock_event_device *newdev)
{
if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
return false;
return tick_check_preferred(curdev, newdev);
}
/*
* Check, if the new registered device should be used. Called with
* clockevents_lock held and interrupts disabled.
*/
void tick_check_new_device(struct clock_event_device *newdev)
{
struct clock_event_device *curdev;
struct tick_device *td;
int cpu;
cpu = smp_processor_id();
td = &per_cpu(tick_cpu_device, cpu);
curdev = td->evtdev;
if (!tick_check_replacement(curdev, newdev))
goto out_bc;
if (!try_module_get(newdev->owner))
return;
/*
* Replace the eventually existing device by the new
* device. If the current device is the broadcast device, do
* not give it back to the clockevents layer !
*/
if (tick_is_broadcast_device(curdev)) {
clockevents_shutdown(curdev);
curdev = NULL;
}
clockevents_exchange_device(curdev, newdev);
tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
tick_oneshot_notify();
return;
out_bc:
/*
* Can the new device be used as a broadcast device ?
*/
tick_install_broadcast_device(newdev, cpu);
}
/**
* tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
* @state: The target state (enter/exit)
*
* The system enters/leaves a state, where affected devices might stop
* Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
*
* Called with interrupts disabled, so clockevents_lock is not
* required here because the local clock event device cannot go away
* under us.
*/
int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
return __tick_broadcast_oneshot_control(state);
}
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
#ifdef CONFIG_HOTPLUG_CPU
/*
* Transfer the do_timer job away from a dying cpu.
*
* Called with interrupts disabled. No locking required. If
* tick_do_timer_cpu is owned by this cpu, nothing can change it.
*/
void tick_handover_do_timer(void)
{
if (tick_do_timer_cpu == smp_processor_id())
tick_do_timer_cpu = cpumask_first(cpu_online_mask);
}
/*
* Shutdown an event device on a given cpu:
*
* This is called on a life CPU, when a CPU is dead. So we cannot
* access the hardware device itself.
* We just set the mode and remove it from the lists.
*/
void tick_shutdown(unsigned int cpu)
{
struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
struct clock_event_device *dev = td->evtdev;
td->mode = TICKDEV_MODE_PERIODIC;
if (dev) {
/*
* Prevent that the clock events layer tries to call
* the set mode function!
*/
clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
clockevents_exchange_device(dev, NULL);
dev->event_handler = clockevents_handle_noop;
td->evtdev = NULL;
}
}
#endif
/**
* tick_suspend_local - Suspend the local tick device
*
* Called from the local cpu for freeze with interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend_local(void)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev);
}
/**
* tick_resume_local - Resume the local tick device
*
* Called from the local CPU for unfreeze or XEN resume magic.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume_local(void)
{
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
bool broadcast = tick_resume_check_broadcast();
clockevents_tick_resume(td->evtdev);
if (!broadcast) {
if (td->mode == TICKDEV_MODE_PERIODIC)
tick_setup_periodic(td->evtdev, 0);
else
tick_resume_oneshot();
}
/*
* Ensure that hrtimers are up to date and the clockevents device
* is reprogrammed correctly when high resolution timers are
* enabled.
*/
hrtimers_resume_local();
}
/**
* tick_suspend - Suspend the tick and the broadcast device
*
* Called from syscore_suspend() via timekeeping_suspend with only one
* CPU online and interrupts disabled or from tick_unfreeze() under
* tick_freeze_lock.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_suspend(void)
{
tick_suspend_local();
tick_suspend_broadcast();
}
/**
* tick_resume - Resume the tick and the broadcast device
*
* Called from syscore_resume() via timekeeping_resume with only one
* CPU online and interrupts disabled.
*
* No locks required. Nothing can change the per cpu device.
*/
void tick_resume(void)
{
tick_resume_broadcast();
tick_resume_local();
}
#ifdef CONFIG_SUSPEND
static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
static unsigned int tick_freeze_depth;
/**
* tick_freeze - Suspend the local tick and (possibly) timekeeping.
*
* Check if this is the last online CPU executing the function and if so,
* suspend timekeeping. Otherwise suspend the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_unfreeze().
* Interrupts must not be enabled before the subsequent %tick_unfreeze().
*/
void tick_freeze(void)
{
raw_spin_lock(&tick_freeze_lock);
tick_freeze_depth++;
if (tick_freeze_depth == num_online_cpus()) {
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
system_state = SYSTEM_SUSPEND;
sched_clock_suspend();
timekeeping_suspend();
} else {
tick_suspend_local();
}
raw_spin_unlock(&tick_freeze_lock);
}
/**
* tick_unfreeze - Resume the local tick and (possibly) timekeeping.
*
* Check if this is the first CPU executing the function and if so, resume
* timekeeping. Otherwise resume the local tick.
*
* Call with interrupts disabled. Must be balanced with %tick_freeze().
* Interrupts must not be enabled after the preceding %tick_freeze().
*/
void tick_unfreeze(void)
{
raw_spin_lock(&tick_freeze_lock);
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
sched_clock_resume();
system_state = SYSTEM_RUNNING;
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
touch_softlockup_watchdog();
tick_resume_local();
}
tick_freeze_depth--;
raw_spin_unlock(&tick_freeze_lock);
}
#endif /* CONFIG_SUSPEND */
/**
* tick_init - initialize the tick control
*/
void __init tick_init(void)
{
tick_broadcast_init();
tick_nohz_init();
}