Merge branch 'android14-6.1' into branch 'android14-6.1-lts'
This catches the android14-6.1-lts branch up with the latest changes and abi updates. Included in here are the following commits:07775f9683ANDROID: GKI: Add symbols for rockchip sataf44d373b32ANDROID: sched: Add trace_android_rvh_setschedulerefa8f34b5aANDROID: Update the ABI symbol listcee8ebf7c5ANDROID: GKI: build damon for monitoring virtual address spaces31c59d59c7UPSTREAM: mm/damon/sysfs-schemes: handle tried region directory allocation failure1cedfc05e9UPSTREAM: mm/damon/sysfs-schemes: handle tried regions sysfs directory allocation failure7fbeab3c65UPSTREAM: mm/damon/sysfs: check error from damon_sysfs_update_target()606444fd06UPSTREAM: mm/damon/sysfs: eliminate potential uninitialized variable warningc132d077ebUPSTREAM: mm/damon/sysfs: update monitoring target regions for online input commit6b7c4cc262UPSTREAM: mm/damon/sysfs: remove requested targets when online-commit inputs1e19db10e7UPSTREAM: mm/damon/sysfs: avoid empty scheme tried regions for large apply intervalc194e597cbUPSTREAM: mm/damon/sysfs-schemes: do not update tried regions more than one DAMON snapshotf5a0a8bc43UPSTREAM: mm/damon/sysfs: check DAMOS regions update progress from before_terminate()b46391e092UPSTREAM: mm/damon/sysfs: implement a command for updating only schemes tried total bytes7d48e19f74UPSTREAM: mm/damon/sysfs-schemes: implement DAMOS tried total bytes filea548d90994UPSTREAM: mm/damon/ops-common: refactor to use {pte|pmd}p_clear_young_notify()ea215c9a10UPSTREAM: mm/damon/core: skip apply schemes if empty3ca21ef5faUPSTREAM: mm/damon: use kstrtobool() instead of strtobool()5bf7b56860UPSTREAM: mm/damon/sysfs-schemes: implement DAMOS-tried regions clear command80ccab9b0eUPSTREAM: mm/damon/sysfs: implement DAMOS tried regions update command3421250b35UPSTREAM: mm/damon/sysfs-schemes: implement scheme region directoryb4c34cc168UPSTREAM: mm/damon/sysfs-schemes: implement schemes/tried_regions directoryb5d1f3576bUPSTREAM: mm/damon/core: add a callback for scheme target regions check6547a97f32UPSTREAM: mm/damon/lru_sort: enable and disable synchronously540e9b850dUPSTREAM: mm/damon/reclaim: enable and disable synchronously4e2d3f8e31UPSTREAM: mm/damon/{reclaim,lru_sort}: remove unnecessarily included headers3c0bc73f6eUPSTREAM: mm/damon/modules: deduplicate init steps for DAMON context setup67ef7b0f42UPSTREAM: mm/damon/sysfs: split out schemes directory implementation to separate file0b17df8a4fUPSTREAM: mm/damon/sysfs: split out kdamond-independent schemes stats update logic into a new functiona45dff567cUPSTREAM: mm/damon/sysfs: move unsigned long range directory to common modulec5038d80ceUPSTREAM: mm/damon/sysfs: move sysfs_lock to common moduleb7fc8d59a5UPSTREAM: mm/damon/sysfs: remove parameters of damon_sysfs_region_alloc()19364f11a4UPSTREAM: mm/damon/sysfs: use damon_addr_range for region's start and end valuesb6e6b1dbf8UPSTREAM: mm/damon/core: split out scheme quota adjustment logic into a new function43475d9708UPSTREAM: mm/damon/core: split out scheme stat update logic into a new function0b0a43029eUPSTREAM: mm/damon/core: split damos application logic into a new function6c7495f04aUPSTREAM: mm/damon/core: split out DAMOS-charged region skip logic into a new functionac1031618aANDROID: Snapshot Mainline's version of checkpatch.pl4fa87d4d8fANDROID: KVM: arm64: Skip prefaulting ptes which will be modified laterfbc707442cANDROID: KVM: arm64: Introduce module_change_host_prot_rangefd720ebc6aANDROID: KVM: arm64: Relax checks in module_change_host_page_protf082d22541ANDROID: KVM: arm64: Optimise module_change_host_page_prot01dd8c280bANDROID: KVM: arm64: Prefault entries when splitting a block mappingcc653d701fANDROID: virt: gunyah: Zero state_data after vcpu_runcc294d9503ANDROID: Update the ABI symbol list956a0d3998ANDROID: fs: Add vendor hooks for ep_create_wakeup_source & timerfd_created8d2b95fd0ANDROID: ABI: update symbol list for galaxybcc758eed7Reapply "binder: fix UAF caused by faulty buffer cleanup"b2b3a1e6d1UPSTREAM: x86/sev: Check for user-space IOIO pointing to kernel space62b97630d4UPSTREAM: x86/sev: Check IOBM for IOIO exceptions from user-space071c14698cFROMGIT: usb: typec: tcpm: skip checking port->send_discover in PD3.0a9567a35d0ANDROID: arm64: Disable workaround for CPU errata 2441007 and 2441009 Change-Id: Icbda2fae389ea4c2e7230821c59ac0380a35d756 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
a01e106f08
33 changed files with 3040 additions and 1489 deletions
File diff suppressed because it is too large
Load diff
|
|
@ -274,6 +274,8 @@
|
||||||
sched_clock
|
sched_clock
|
||||||
sched_show_task
|
sched_show_task
|
||||||
scnprintf
|
scnprintf
|
||||||
|
scsi_device_quiesce
|
||||||
|
scsi_device_resume
|
||||||
seq_hex_dump
|
seq_hex_dump
|
||||||
seq_lseek
|
seq_lseek
|
||||||
seq_printf
|
seq_printf
|
||||||
|
|
|
||||||
|
|
@ -544,12 +544,16 @@
|
||||||
dma_fence_array_create
|
dma_fence_array_create
|
||||||
dma_fence_context_alloc
|
dma_fence_context_alloc
|
||||||
dma_fence_default_wait
|
dma_fence_default_wait
|
||||||
|
dma_fence_enable_sw_signaling
|
||||||
dma_fence_get_status
|
dma_fence_get_status
|
||||||
dma_fence_init
|
dma_fence_init
|
||||||
dma_fence_release
|
dma_fence_release
|
||||||
dma_fence_remove_callback
|
dma_fence_remove_callback
|
||||||
dma_fence_signal
|
dma_fence_signal
|
||||||
dma_fence_signal_locked
|
dma_fence_signal_locked
|
||||||
|
dma_fence_unwrap_first
|
||||||
|
__dma_fence_unwrap_merge
|
||||||
|
dma_fence_unwrap_next
|
||||||
dma_fence_wait_timeout
|
dma_fence_wait_timeout
|
||||||
dma_free_attrs
|
dma_free_attrs
|
||||||
dma_free_pages
|
dma_free_pages
|
||||||
|
|
@ -1162,7 +1166,10 @@
|
||||||
kernel_param_lock
|
kernel_param_lock
|
||||||
kernel_param_unlock
|
kernel_param_unlock
|
||||||
kernel_restart
|
kernel_restart
|
||||||
|
kernfs_find_and_get_ns
|
||||||
|
kernfs_notify
|
||||||
kernfs_path_from_node
|
kernfs_path_from_node
|
||||||
|
kernfs_put
|
||||||
key_create_or_update
|
key_create_or_update
|
||||||
key_put
|
key_put
|
||||||
keyring_alloc
|
keyring_alloc
|
||||||
|
|
@ -2150,6 +2157,7 @@
|
||||||
thermal_zone_get_temp
|
thermal_zone_get_temp
|
||||||
thermal_zone_get_zone_by_name
|
thermal_zone_get_zone_by_name
|
||||||
thread_group_cputime_adjusted
|
thread_group_cputime_adjusted
|
||||||
|
tick_nohz_get_idle_calls_cpu
|
||||||
time64_to_tm
|
time64_to_tm
|
||||||
topology_update_thermal_pressure
|
topology_update_thermal_pressure
|
||||||
_totalram_pages
|
_totalram_pages
|
||||||
|
|
@ -2209,6 +2217,7 @@
|
||||||
__traceiter_android_vh_dup_task_struct
|
__traceiter_android_vh_dup_task_struct
|
||||||
__traceiter_android_vh_early_resume_begin
|
__traceiter_android_vh_early_resume_begin
|
||||||
__traceiter_android_vh_enable_thermal_genl_check
|
__traceiter_android_vh_enable_thermal_genl_check
|
||||||
|
__traceiter_android_vh_ep_create_wakeup_source
|
||||||
__traceiter_android_vh_filemap_get_folio
|
__traceiter_android_vh_filemap_get_folio
|
||||||
__traceiter_android_vh_ipi_stop
|
__traceiter_android_vh_ipi_stop
|
||||||
__traceiter_android_vh_meminfo_proc_show
|
__traceiter_android_vh_meminfo_proc_show
|
||||||
|
|
@ -2222,6 +2231,7 @@
|
||||||
__traceiter_android_vh_setscheduler_uclamp
|
__traceiter_android_vh_setscheduler_uclamp
|
||||||
__traceiter_android_vh_si_meminfo_adjust
|
__traceiter_android_vh_si_meminfo_adjust
|
||||||
__traceiter_android_vh_sysrq_crash
|
__traceiter_android_vh_sysrq_crash
|
||||||
|
__traceiter_android_vh_timerfd_create
|
||||||
__traceiter_android_vh_typec_store_partner_src_caps
|
__traceiter_android_vh_typec_store_partner_src_caps
|
||||||
__traceiter_android_vh_typec_tcpci_override_toggling
|
__traceiter_android_vh_typec_tcpci_override_toggling
|
||||||
__traceiter_android_vh_typec_tcpm_get_timer
|
__traceiter_android_vh_typec_tcpm_get_timer
|
||||||
|
|
@ -2316,6 +2326,7 @@
|
||||||
__tracepoint_android_vh_dup_task_struct
|
__tracepoint_android_vh_dup_task_struct
|
||||||
__tracepoint_android_vh_early_resume_begin
|
__tracepoint_android_vh_early_resume_begin
|
||||||
__tracepoint_android_vh_enable_thermal_genl_check
|
__tracepoint_android_vh_enable_thermal_genl_check
|
||||||
|
__tracepoint_android_vh_ep_create_wakeup_source
|
||||||
__tracepoint_android_vh_filemap_get_folio
|
__tracepoint_android_vh_filemap_get_folio
|
||||||
__tracepoint_android_vh_ipi_stop
|
__tracepoint_android_vh_ipi_stop
|
||||||
__tracepoint_android_vh_meminfo_proc_show
|
__tracepoint_android_vh_meminfo_proc_show
|
||||||
|
|
@ -2329,6 +2340,7 @@
|
||||||
__tracepoint_android_vh_setscheduler_uclamp
|
__tracepoint_android_vh_setscheduler_uclamp
|
||||||
__tracepoint_android_vh_si_meminfo_adjust
|
__tracepoint_android_vh_si_meminfo_adjust
|
||||||
__tracepoint_android_vh_sysrq_crash
|
__tracepoint_android_vh_sysrq_crash
|
||||||
|
__tracepoint_android_vh_timerfd_create
|
||||||
__tracepoint_android_vh_typec_store_partner_src_caps
|
__tracepoint_android_vh_typec_store_partner_src_caps
|
||||||
__tracepoint_android_vh_typec_tcpci_override_toggling
|
__tracepoint_android_vh_typec_tcpci_override_toggling
|
||||||
__tracepoint_android_vh_typec_tcpm_get_timer
|
__tracepoint_android_vh_typec_tcpm_get_timer
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@
|
||||||
# commonly used symbols
|
# commonly used symbols
|
||||||
add_timer
|
add_timer
|
||||||
alloc_chrdev_region
|
alloc_chrdev_region
|
||||||
|
alloc_etherdev_mqs
|
||||||
alloc_iova_fast
|
alloc_iova_fast
|
||||||
__alloc_pages
|
__alloc_pages
|
||||||
__alloc_skb
|
__alloc_skb
|
||||||
|
|
@ -827,9 +828,25 @@
|
||||||
param_ops_int
|
param_ops_int
|
||||||
param_ops_string
|
param_ops_string
|
||||||
param_ops_uint
|
param_ops_uint
|
||||||
|
param_ops_ulong
|
||||||
|
pci_disable_device
|
||||||
|
pci_disable_link_state
|
||||||
pcie_capability_clear_and_set_word
|
pcie_capability_clear_and_set_word
|
||||||
|
pci_find_capability
|
||||||
|
pcim_enable_device
|
||||||
|
pcim_iomap_table
|
||||||
|
pcim_pin_device
|
||||||
|
pci_read_config_byte
|
||||||
pci_read_config_dword
|
pci_read_config_dword
|
||||||
|
pci_read_config_word
|
||||||
|
__pci_register_driver
|
||||||
|
pci_restore_state
|
||||||
|
pci_save_state
|
||||||
|
pci_set_master
|
||||||
|
pci_set_power_state
|
||||||
|
pci_unregister_driver
|
||||||
pci_write_config_dword
|
pci_write_config_dword
|
||||||
|
pci_write_config_word
|
||||||
__per_cpu_offset
|
__per_cpu_offset
|
||||||
perf_trace_buf_alloc
|
perf_trace_buf_alloc
|
||||||
perf_trace_run_bpf_submit
|
perf_trace_run_bpf_submit
|
||||||
|
|
@ -1023,7 +1040,11 @@
|
||||||
sched_set_fifo
|
sched_set_fifo
|
||||||
schedule
|
schedule
|
||||||
schedule_timeout
|
schedule_timeout
|
||||||
|
schedule_timeout_uninterruptible
|
||||||
scnprintf
|
scnprintf
|
||||||
|
scsi_command_size_tbl
|
||||||
|
scsi_device_get
|
||||||
|
scsi_device_put
|
||||||
__sdhci_add_host
|
__sdhci_add_host
|
||||||
sdhci_cleanup_host
|
sdhci_cleanup_host
|
||||||
sdhci_enable_clk
|
sdhci_enable_clk
|
||||||
|
|
@ -1325,6 +1346,7 @@
|
||||||
vunmap
|
vunmap
|
||||||
vzalloc
|
vzalloc
|
||||||
wait_for_completion
|
wait_for_completion
|
||||||
|
wait_for_completion_interruptible
|
||||||
wait_for_completion_timeout
|
wait_for_completion_timeout
|
||||||
__wake_up
|
__wake_up
|
||||||
wake_up_process
|
wake_up_process
|
||||||
|
|
@ -1346,15 +1368,23 @@
|
||||||
skcipher_walk_aead_decrypt
|
skcipher_walk_aead_decrypt
|
||||||
skcipher_walk_aead_encrypt
|
skcipher_walk_aead_encrypt
|
||||||
|
|
||||||
|
# required by ahci.ko
|
||||||
|
pci_alloc_irq_vectors_affinity
|
||||||
|
pci_free_irq_vectors
|
||||||
|
pci_intx
|
||||||
|
pci_irq_vector
|
||||||
|
pci_match_id
|
||||||
|
pcim_iomap_regions_request_all
|
||||||
|
sysfs_add_file_to_group
|
||||||
|
sysfs_remove_file_from_group
|
||||||
|
|
||||||
# required by analogix_dp.ko
|
# required by analogix_dp.ko
|
||||||
drm_atomic_get_old_connector_for_encoder
|
drm_atomic_get_old_connector_for_encoder
|
||||||
|
|
||||||
# required by aspm_ext.ko
|
# required by aspm_ext.ko
|
||||||
pci_find_capability
|
|
||||||
pci_find_ext_capability
|
pci_find_ext_capability
|
||||||
|
|
||||||
# required by bcmdhd.ko
|
# required by bcmdhd.ko
|
||||||
alloc_etherdev_mqs
|
|
||||||
cpu_bit_bitmap
|
cpu_bit_bitmap
|
||||||
down_interruptible
|
down_interruptible
|
||||||
down_timeout
|
down_timeout
|
||||||
|
|
@ -1873,6 +1903,60 @@
|
||||||
# required by ledtrig-heartbeat.ko
|
# required by ledtrig-heartbeat.ko
|
||||||
avenrun
|
avenrun
|
||||||
|
|
||||||
|
# required by libahci.ko
|
||||||
|
__printk_ratelimit
|
||||||
|
|
||||||
|
# required by libahci_platform.ko
|
||||||
|
reset_control_rearm
|
||||||
|
|
||||||
|
# required by libata.ko
|
||||||
|
async_schedule_node
|
||||||
|
async_synchronize_cookie
|
||||||
|
attribute_container_register
|
||||||
|
attribute_container_unregister
|
||||||
|
autoremove_wake_function
|
||||||
|
blk_abort_request
|
||||||
|
blk_queue_max_hw_sectors
|
||||||
|
blk_queue_max_segments
|
||||||
|
blk_queue_update_dma_alignment
|
||||||
|
blk_queue_update_dma_pad
|
||||||
|
glob_match
|
||||||
|
pci_bus_type
|
||||||
|
pcim_iomap_regions
|
||||||
|
prepare_to_wait
|
||||||
|
__scsi_add_device
|
||||||
|
scsi_add_host_with_dma
|
||||||
|
scsi_build_sense
|
||||||
|
scsi_change_queue_depth
|
||||||
|
scsi_check_sense
|
||||||
|
scsi_device_set_state
|
||||||
|
scsi_done
|
||||||
|
scsi_eh_finish_cmd
|
||||||
|
scsi_eh_flush_done_q
|
||||||
|
scsi_execute_cmd
|
||||||
|
__scsi_format_command
|
||||||
|
scsi_host_alloc
|
||||||
|
scsi_host_put
|
||||||
|
scsi_remove_device
|
||||||
|
scsi_remove_host
|
||||||
|
scsi_rescan_device
|
||||||
|
scsi_schedule_eh
|
||||||
|
scsi_sense_desc_find
|
||||||
|
scsi_set_sense_field_pointer
|
||||||
|
scsi_set_sense_information
|
||||||
|
sdev_evt_send_simple
|
||||||
|
system_entering_hibernation
|
||||||
|
trace_seq_printf
|
||||||
|
trace_seq_putc
|
||||||
|
transport_add_device
|
||||||
|
transport_class_register
|
||||||
|
transport_class_unregister
|
||||||
|
transport_configure_device
|
||||||
|
transport_destroy_device
|
||||||
|
transport_remove_device
|
||||||
|
transport_setup_device
|
||||||
|
vscnprintf
|
||||||
|
|
||||||
# required by mac80211.ko
|
# required by mac80211.ko
|
||||||
alloc_netdev_mqs
|
alloc_netdev_mqs
|
||||||
__alloc_percpu_gfp
|
__alloc_percpu_gfp
|
||||||
|
|
|
||||||
|
|
@ -642,7 +642,6 @@ config ARM64_WORKAROUND_REPEAT_TLBI
|
||||||
|
|
||||||
config ARM64_ERRATUM_2441007
|
config ARM64_ERRATUM_2441007
|
||||||
bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
|
bool "Cortex-A55: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
|
||||||
default y
|
|
||||||
select ARM64_WORKAROUND_REPEAT_TLBI
|
select ARM64_WORKAROUND_REPEAT_TLBI
|
||||||
help
|
help
|
||||||
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
|
This option adds a workaround for ARM Cortex-A55 erratum #2441007.
|
||||||
|
|
@ -881,7 +880,6 @@ config ARM64_ERRATUM_2224489
|
||||||
|
|
||||||
config ARM64_ERRATUM_2441009
|
config ARM64_ERRATUM_2441009
|
||||||
bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
|
bool "Cortex-A510: Completion of affected memory accesses might not be guaranteed by completion of a TLBI"
|
||||||
default y
|
|
||||||
select ARM64_WORKAROUND_REPEAT_TLBI
|
select ARM64_WORKAROUND_REPEAT_TLBI
|
||||||
help
|
help
|
||||||
This option adds a workaround for ARM Cortex-A510 erratum #2441009.
|
This option adds a workaround for ARM Cortex-A510 erratum #2441009.
|
||||||
|
|
|
||||||
|
|
@ -123,6 +123,9 @@ CONFIG_ANON_VMA_NAME=y
|
||||||
CONFIG_USERFAULTFD=y
|
CONFIG_USERFAULTFD=y
|
||||||
CONFIG_LRU_GEN=y
|
CONFIG_LRU_GEN=y
|
||||||
CONFIG_LRU_GEN_ENABLED=y
|
CONFIG_LRU_GEN_ENABLED=y
|
||||||
|
CONFIG_DAMON=y
|
||||||
|
CONFIG_DAMON_VADDR=y
|
||||||
|
CONFIG_DAMON_SYSFS=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
CONFIG_PACKET=y
|
CONFIG_PACKET=y
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
|
|
|
||||||
|
|
@ -153,7 +153,8 @@ struct pkvm_module_ops {
|
||||||
void* (*hyp_va)(phys_addr_t phys);
|
void* (*hyp_va)(phys_addr_t phys);
|
||||||
unsigned long (*kern_hyp_va)(unsigned long x);
|
unsigned long (*kern_hyp_va)(unsigned long x);
|
||||||
|
|
||||||
ANDROID_KABI_RESERVE(1);
|
ANDROID_KABI_USE(1, int (*host_stage2_mod_prot_range)(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages));
|
||||||
|
|
||||||
ANDROID_KABI_RESERVE(2);
|
ANDROID_KABI_RESERVE(2);
|
||||||
ANDROID_KABI_RESERVE(3);
|
ANDROID_KABI_RESERVE(3);
|
||||||
ANDROID_KABI_RESERVE(4);
|
ANDROID_KABI_RESERVE(4);
|
||||||
|
|
|
||||||
|
|
@ -104,6 +104,7 @@ int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
|
||||||
struct kvm_hyp_memcache *host_mc);
|
struct kvm_hyp_memcache *host_mc);
|
||||||
|
|
||||||
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
|
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot);
|
||||||
|
int module_change_host_page_prot_range(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages);
|
||||||
|
|
||||||
void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
|
void destroy_hyp_vm_pgt(struct pkvm_hyp_vm *vm);
|
||||||
void drain_hyp_pool(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
|
void drain_hyp_pool(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
|
||||||
|
|
|
||||||
|
|
@ -2008,77 +2008,80 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm_pgtable_prot prot)
|
|
||||||
{
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
/* XXX: optimize ... */
|
|
||||||
if (kvm_pte_valid(pte) && (level == KVM_PGTABLE_MAX_LEVELS - 1))
|
|
||||||
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, addr, PAGE_SIZE);
|
|
||||||
if (!ret)
|
|
||||||
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot, false);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
|
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
|
||||||
KVM_PGTABLE_PROT_DEVICE |\
|
KVM_PGTABLE_PROT_DEVICE |\
|
||||||
KVM_PGTABLE_PROT_NC | \
|
KVM_PGTABLE_PROT_NC | \
|
||||||
KVM_PGTABLE_PROT_PXN | \
|
KVM_PGTABLE_PROT_PXN | \
|
||||||
KVM_PGTABLE_PROT_UXN)
|
KVM_PGTABLE_PROT_UXN)
|
||||||
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
|
|
||||||
|
int module_change_host_page_prot_range(u64 pfn, enum kvm_pgtable_prot prot, u64 nr_pages)
|
||||||
{
|
{
|
||||||
u64 addr = hyp_pfn_to_phys(pfn);
|
u64 i, addr = hyp_pfn_to_phys(pfn);
|
||||||
|
u64 end = addr + nr_pages * PAGE_SIZE;
|
||||||
struct hyp_page *page = NULL;
|
struct hyp_page *page = NULL;
|
||||||
kvm_pte_t pte;
|
struct kvm_mem_range range;
|
||||||
u32 level;
|
bool is_mmio;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
|
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
is_mmio = !find_mem_range(addr, &range);
|
||||||
|
if (end > range.end) {
|
||||||
|
/* Specified range not in a single mmio or memory block. */
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
|
||||||
host_lock_component();
|
host_lock_component();
|
||||||
ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
|
|
||||||
if (ret)
|
|
||||||
goto unlock;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
|
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
|
||||||
* of module-owned MMIO regions hard, so we trust the modules not to
|
* of module-owned MMIO regions hard, so we trust the modules not to
|
||||||
* mess things up.
|
* mess things up.
|
||||||
*/
|
*/
|
||||||
if (!addr_is_memory(addr))
|
if (is_mmio)
|
||||||
goto update;
|
goto update;
|
||||||
|
|
||||||
ret = -EPERM;
|
/* Range is memory: we can track module ownership. */
|
||||||
page = hyp_phys_to_page(addr);
|
page = hyp_phys_to_page(addr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Modules can only relax permissions of pages they own, and restrict
|
* Modules can only modify pages they already own, and pristine host
|
||||||
* permissions of pristine pages.
|
* pages. The entire range must be consistently one or the other.
|
||||||
*/
|
*/
|
||||||
if (prot == KVM_PGTABLE_PROT_RWX) {
|
if (page->flags & MODULE_OWNED_PAGE) {
|
||||||
if (!(page->flags & MODULE_OWNED_PAGE))
|
/* The entire range must be module-owned. */
|
||||||
|
ret = -EPERM;
|
||||||
|
for (i = 1; i < nr_pages; i++) {
|
||||||
|
if (!(page[i].flags & MODULE_OWNED_PAGE))
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* The entire range must be pristine. */
|
||||||
|
ret = __host_check_page_state_range(
|
||||||
|
addr, nr_pages << PAGE_SHIFT, PKVM_PAGE_OWNED);
|
||||||
|
if (ret)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
} else if (host_get_page_state(pte, addr) != PKVM_PAGE_OWNED) {
|
|
||||||
goto unlock;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
update:
|
update:
|
||||||
if (prot == default_host_prot(!!page))
|
if (!prot) {
|
||||||
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST);
|
ret = host_stage2_set_owner_locked(
|
||||||
else if (!prot)
|
addr, nr_pages << PAGE_SHIFT, PKVM_ID_PROTECTED);
|
||||||
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED);
|
} else {
|
||||||
else
|
ret = host_stage2_idmap_locked(
|
||||||
ret = restrict_host_page_perms(addr, pte, level, prot);
|
addr, nr_pages << PAGE_SHIFT, prot, false);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret || !page)
|
if (WARN_ON(ret) || !page)
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
if (prot != KVM_PGTABLE_PROT_RWX)
|
for (i = 0; i < nr_pages; i++) {
|
||||||
hyp_phys_to_page(addr)->flags |= MODULE_OWNED_PAGE;
|
if (prot != KVM_PGTABLE_PROT_RWX)
|
||||||
else
|
page[i].flags |= MODULE_OWNED_PAGE;
|
||||||
hyp_phys_to_page(addr)->flags &= ~MODULE_OWNED_PAGE;
|
else
|
||||||
|
page[i].flags &= ~MODULE_OWNED_PAGE;
|
||||||
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
host_unlock_component();
|
host_unlock_component();
|
||||||
|
|
@ -2086,6 +2089,11 @@ unlock:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
|
||||||
|
{
|
||||||
|
return module_change_host_page_prot_range(pfn, prot, 1);
|
||||||
|
}
|
||||||
|
|
||||||
int hyp_pin_shared_mem(void *from, void *to)
|
int hyp_pin_shared_mem(void *from, void *to)
|
||||||
{
|
{
|
||||||
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
|
u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
|
||||||
|
|
|
||||||
|
|
@ -115,6 +115,7 @@ const struct pkvm_module_ops module_ops = {
|
||||||
.hyp_pa = hyp_virt_to_phys,
|
.hyp_pa = hyp_virt_to_phys,
|
||||||
.hyp_va = hyp_phys_to_virt,
|
.hyp_va = hyp_phys_to_virt,
|
||||||
.kern_hyp_va = __kern_hyp_va,
|
.kern_hyp_va = __kern_hyp_va,
|
||||||
|
.host_stage2_mod_prot_range = module_change_host_page_prot_range,
|
||||||
};
|
};
|
||||||
|
|
||||||
int __pkvm_init_module(void *module_init)
|
int __pkvm_init_module(void *module_init)
|
||||||
|
|
|
||||||
|
|
@ -645,8 +645,13 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
|
||||||
return prot;
|
return prot;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
|
static bool stage2_pte_needs_update(struct kvm_pgtable *pgt,
|
||||||
|
kvm_pte_t old, kvm_pte_t new)
|
||||||
{
|
{
|
||||||
|
/* Following filter logic applies only to guest stage-2 entries. */
|
||||||
|
if (pgt->flags & KVM_PGTABLE_S2_IDMAP)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
|
if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
|
|
@ -715,12 +720,15 @@ static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||||
new = data->annotation;
|
new = data->annotation;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip updating the PTE if we are trying to recreate the exact
|
* Skip updating a guest PTE if we are trying to recreate the exact
|
||||||
* same mapping or only change the access permissions. Instead,
|
* same mapping or change only the access permissions. Instead,
|
||||||
* the vCPU will exit one more time from guest if still needed
|
* the vCPU will exit one more time from the guest if still needed
|
||||||
* and then go through the path of relaxing permissions.
|
* and then go through the path of relaxing permissions. This applies
|
||||||
|
* only to guest PTEs; Host PTEs are unconditionally updated. The
|
||||||
|
* host cannot livelock because the abort handler has done prior
|
||||||
|
* checks before calling here.
|
||||||
*/
|
*/
|
||||||
if (!stage2_pte_needs_update(old, new))
|
if (!stage2_pte_needs_update(pgt, old, new))
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
|
|
||||||
if (pte_ops->pte_is_counted_cb(old, level))
|
if (pte_ops->pte_is_counted_cb(old, level))
|
||||||
|
|
@ -775,6 +783,30 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void stage2_map_prefault_idmap(struct kvm_pgtable_pte_ops *pte_ops,
|
||||||
|
u64 addr, u64 end, u32 level,
|
||||||
|
kvm_pte_t *ptep, kvm_pte_t block_pte)
|
||||||
|
{
|
||||||
|
u64 pa, granule;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
WARN_ON(pte_ops->pte_is_counted_cb(block_pte, level-1));
|
||||||
|
|
||||||
|
if (!kvm_pte_valid(block_pte))
|
||||||
|
return;
|
||||||
|
|
||||||
|
pa = ALIGN_DOWN(addr, kvm_granule_size(level-1));
|
||||||
|
granule = kvm_granule_size(level);
|
||||||
|
for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep, pa += granule) {
|
||||||
|
kvm_pte_t pte = kvm_init_valid_leaf_pte(pa, block_pte, level);
|
||||||
|
/* Skip ptes in the range being modified by the caller. */
|
||||||
|
if ((pa < addr) || (pa >= end)) {
|
||||||
|
/* We can write non-atomically: ptep isn't yet live. */
|
||||||
|
*ptep = pte;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||||
struct stage2_map_data *data)
|
struct stage2_map_data *data)
|
||||||
{
|
{
|
||||||
|
|
@ -805,6 +837,11 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||||
if (!childp)
|
if (!childp)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (pgt->flags & KVM_PGTABLE_S2_IDMAP) {
|
||||||
|
stage2_map_prefault_idmap(pte_ops, addr, end, level + 1,
|
||||||
|
childp, pte);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we've run into an existing block mapping then replace it with
|
* If we've run into an existing block mapping then replace it with
|
||||||
* a table. Accesses beyond 'end' that fall within the new table
|
* a table. Accesses beyond 'end' that fall within the new table
|
||||||
|
|
|
||||||
|
|
@ -118,6 +118,9 @@ CONFIG_ANON_VMA_NAME=y
|
||||||
CONFIG_USERFAULTFD=y
|
CONFIG_USERFAULTFD=y
|
||||||
CONFIG_LRU_GEN=y
|
CONFIG_LRU_GEN=y
|
||||||
CONFIG_LRU_GEN_ENABLED=y
|
CONFIG_LRU_GEN_ENABLED=y
|
||||||
|
CONFIG_DAMON=y
|
||||||
|
CONFIG_DAMON_VADDR=y
|
||||||
|
CONFIG_DAMON_SYSFS=y
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
CONFIG_PACKET=y
|
CONFIG_PACKET=y
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
|
|
|
||||||
|
|
@ -2127,24 +2127,23 @@ static void binder_deferred_fd_close(int fd)
|
||||||
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||||
struct binder_thread *thread,
|
struct binder_thread *thread,
|
||||||
struct binder_buffer *buffer,
|
struct binder_buffer *buffer,
|
||||||
binder_size_t failed_at,
|
binder_size_t off_end_offset,
|
||||||
bool is_failure)
|
bool is_failure)
|
||||||
{
|
{
|
||||||
int debug_id = buffer->debug_id;
|
int debug_id = buffer->debug_id;
|
||||||
binder_size_t off_start_offset, buffer_offset, off_end_offset;
|
binder_size_t off_start_offset, buffer_offset;
|
||||||
|
|
||||||
binder_debug(BINDER_DEBUG_TRANSACTION,
|
binder_debug(BINDER_DEBUG_TRANSACTION,
|
||||||
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
|
"%d buffer release %d, size %zd-%zd, failed at %llx\n",
|
||||||
proc->pid, buffer->debug_id,
|
proc->pid, buffer->debug_id,
|
||||||
buffer->data_size, buffer->offsets_size,
|
buffer->data_size, buffer->offsets_size,
|
||||||
(unsigned long long)failed_at);
|
(unsigned long long)off_end_offset);
|
||||||
|
|
||||||
if (buffer->target_node)
|
if (buffer->target_node)
|
||||||
binder_dec_node(buffer->target_node, 1, 0);
|
binder_dec_node(buffer->target_node, 1, 0);
|
||||||
|
|
||||||
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
|
off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
|
||||||
off_end_offset = is_failure && failed_at ? failed_at :
|
|
||||||
off_start_offset + buffer->offsets_size;
|
|
||||||
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
|
||||||
buffer_offset += sizeof(binder_size_t)) {
|
buffer_offset += sizeof(binder_size_t)) {
|
||||||
struct binder_object_header *hdr;
|
struct binder_object_header *hdr;
|
||||||
|
|
@ -2304,6 +2303,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Clean up all the objects in the buffer */
|
||||||
|
static inline void binder_release_entire_buffer(struct binder_proc *proc,
|
||||||
|
struct binder_thread *thread,
|
||||||
|
struct binder_buffer *buffer,
|
||||||
|
bool is_failure)
|
||||||
|
{
|
||||||
|
binder_size_t off_end_offset;
|
||||||
|
|
||||||
|
off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
|
||||||
|
off_end_offset += buffer->offsets_size;
|
||||||
|
|
||||||
|
binder_transaction_buffer_release(proc, thread, buffer,
|
||||||
|
off_end_offset, is_failure);
|
||||||
|
}
|
||||||
|
|
||||||
static int binder_translate_binder(struct flat_binder_object *fp,
|
static int binder_translate_binder(struct flat_binder_object *fp,
|
||||||
struct binder_transaction *t,
|
struct binder_transaction *t,
|
||||||
struct binder_thread *thread)
|
struct binder_thread *thread)
|
||||||
|
|
@ -3013,7 +3027,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
|
||||||
t_outdated->buffer = NULL;
|
t_outdated->buffer = NULL;
|
||||||
buffer->transaction = NULL;
|
buffer->transaction = NULL;
|
||||||
trace_binder_transaction_update_buffer_release(buffer);
|
trace_binder_transaction_update_buffer_release(buffer);
|
||||||
binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
|
binder_release_entire_buffer(proc, NULL, buffer, false);
|
||||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||||
kfree(t_outdated);
|
kfree(t_outdated);
|
||||||
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
binder_stats_deleted(BINDER_STAT_TRANSACTION);
|
||||||
|
|
@ -4004,7 +4018,7 @@ binder_free_buf(struct binder_proc *proc,
|
||||||
binder_node_inner_unlock(buf_node);
|
binder_node_inner_unlock(buf_node);
|
||||||
}
|
}
|
||||||
trace_binder_transaction_buffer_release(buffer);
|
trace_binder_transaction_buffer_release(buffer);
|
||||||
binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
|
binder_release_entire_buffer(proc, thread, buffer, is_failure);
|
||||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,6 +26,7 @@
|
||||||
#include <trace/hooks/printk.h>
|
#include <trace/hooks/printk.h>
|
||||||
#include <trace/hooks/epoch.h>
|
#include <trace/hooks/epoch.h>
|
||||||
#include <trace/hooks/cpufreq.h>
|
#include <trace/hooks/cpufreq.h>
|
||||||
|
#include <trace/hooks/fs.h>
|
||||||
#include <trace/hooks/preemptirq.h>
|
#include <trace/hooks/preemptirq.h>
|
||||||
#include <trace/hooks/ftrace_dump.h>
|
#include <trace/hooks/ftrace_dump.h>
|
||||||
#include <trace/hooks/ufshcd.h>
|
#include <trace/hooks/ufshcd.h>
|
||||||
|
|
@ -365,3 +366,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_update_bus_speed_mode);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_slab_folio_alloced);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_slab_folio_alloced);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_large_alloced);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kmalloc_large_alloced);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_netlink_poll);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_netlink_poll);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ep_create_wakeup_source);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timerfd_create);
|
||||||
|
|
|
||||||
|
|
@ -2855,7 +2855,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||||
PD_MSG_CTRL_NOT_SUPP,
|
PD_MSG_CTRL_NOT_SUPP,
|
||||||
NONE_AMS);
|
NONE_AMS);
|
||||||
} else {
|
} else {
|
||||||
if (port->send_discover) {
|
if (port->send_discover && port->negotiated_rev < PD_REV30) {
|
||||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -2871,7 +2871,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||||
PD_MSG_CTRL_NOT_SUPP,
|
PD_MSG_CTRL_NOT_SUPP,
|
||||||
NONE_AMS);
|
NONE_AMS);
|
||||||
} else {
|
} else {
|
||||||
if (port->send_discover) {
|
if (port->send_discover && port->negotiated_rev < PD_REV30) {
|
||||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
@ -2880,7 +2880,7 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case PD_CTRL_VCONN_SWAP:
|
case PD_CTRL_VCONN_SWAP:
|
||||||
if (port->send_discover) {
|
if (port->send_discover && port->negotiated_rev < PD_REV30) {
|
||||||
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -196,6 +196,7 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
gh_error = gh_hypercall_vcpu_run(vcpu->rsc->capid, state_data, &vcpu_run_resp);
|
gh_error = gh_hypercall_vcpu_run(vcpu->rsc->capid, state_data, &vcpu_run_resp);
|
||||||
|
memset(state_data, 0, sizeof(state_data));
|
||||||
if (gh_error == GH_ERROR_OK) {
|
if (gh_error == GH_ERROR_OK) {
|
||||||
switch (vcpu_run_resp.state) {
|
switch (vcpu_run_resp.state) {
|
||||||
case GH_VCPU_STATE_READY:
|
case GH_VCPU_STATE_READY:
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,8 @@
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <net/busy_poll.h>
|
#include <net/busy_poll.h>
|
||||||
|
|
||||||
|
#include <trace/hooks/fs.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* LOCKING:
|
* LOCKING:
|
||||||
* There are three level of locking required by epoll :
|
* There are three level of locking required by epoll :
|
||||||
|
|
@ -1373,15 +1375,20 @@ static int ep_create_wakeup_source(struct epitem *epi)
|
||||||
{
|
{
|
||||||
struct name_snapshot n;
|
struct name_snapshot n;
|
||||||
struct wakeup_source *ws;
|
struct wakeup_source *ws;
|
||||||
|
char ws_name[64];
|
||||||
|
|
||||||
|
strlcpy(ws_name, "eventpoll", sizeof(ws_name));
|
||||||
|
trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
|
||||||
if (!epi->ep->ws) {
|
if (!epi->ep->ws) {
|
||||||
epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
|
epi->ep->ws = wakeup_source_register(NULL, ws_name);
|
||||||
if (!epi->ep->ws)
|
if (!epi->ep->ws)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
|
take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);
|
||||||
ws = wakeup_source_register(NULL, n.name.name);
|
strlcpy(ws_name, n.name.name, sizeof(ws_name));
|
||||||
|
trace_android_vh_ep_create_wakeup_source(ws_name, sizeof(ws_name));
|
||||||
|
ws = wakeup_source_register(NULL, ws_name);
|
||||||
release_dentry_name_snapshot(&n);
|
release_dentry_name_snapshot(&n);
|
||||||
|
|
||||||
if (!ws)
|
if (!ws)
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,8 @@
|
||||||
#include <linux/rcupdate.h>
|
#include <linux/rcupdate.h>
|
||||||
#include <linux/time_namespace.h>
|
#include <linux/time_namespace.h>
|
||||||
|
|
||||||
|
#include <trace/hooks/fs.h>
|
||||||
|
|
||||||
struct timerfd_ctx {
|
struct timerfd_ctx {
|
||||||
union {
|
union {
|
||||||
struct hrtimer tmr;
|
struct hrtimer tmr;
|
||||||
|
|
@ -407,6 +409,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
|
||||||
{
|
{
|
||||||
int ufd;
|
int ufd;
|
||||||
struct timerfd_ctx *ctx;
|
struct timerfd_ctx *ctx;
|
||||||
|
char file_name_buf[32];
|
||||||
|
|
||||||
/* Check the TFD_* constants for consistency. */
|
/* Check the TFD_* constants for consistency. */
|
||||||
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
|
BUILD_BUG_ON(TFD_CLOEXEC != O_CLOEXEC);
|
||||||
|
|
@ -443,7 +446,9 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
|
||||||
|
|
||||||
ctx->moffs = ktime_mono_to_real(0);
|
ctx->moffs = ktime_mono_to_real(0);
|
||||||
|
|
||||||
ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx,
|
strlcpy(file_name_buf, "[timerfd]", sizeof(file_name_buf));
|
||||||
|
trace_android_vh_timerfd_create(file_name_buf, sizeof(file_name_buf));
|
||||||
|
ufd = anon_inode_getfd(file_name_buf, &timerfd_fops, ctx,
|
||||||
O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
|
O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
|
||||||
if (ufd < 0)
|
if (ufd < 0)
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
|
|
@ -451,7 +456,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
|
||||||
return ufd;
|
return ufd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_timerfd_settime(int ufd, int flags,
|
static int do_timerfd_settime(int ufd, int flags,
|
||||||
const struct itimerspec64 *new,
|
const struct itimerspec64 *new,
|
||||||
struct itimerspec64 *old)
|
struct itimerspec64 *old)
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -357,6 +357,7 @@ struct damon_operations {
|
||||||
* @after_wmarks_check: Called after each schemes' watermarks check.
|
* @after_wmarks_check: Called after each schemes' watermarks check.
|
||||||
* @after_sampling: Called after each sampling.
|
* @after_sampling: Called after each sampling.
|
||||||
* @after_aggregation: Called after each aggregation.
|
* @after_aggregation: Called after each aggregation.
|
||||||
|
* @before_damos_apply: Called before applying DAMOS action.
|
||||||
* @before_terminate: Called before terminating the monitoring.
|
* @before_terminate: Called before terminating the monitoring.
|
||||||
* @private: User private data.
|
* @private: User private data.
|
||||||
*
|
*
|
||||||
|
|
@ -385,6 +386,10 @@ struct damon_callback {
|
||||||
int (*after_wmarks_check)(struct damon_ctx *context);
|
int (*after_wmarks_check)(struct damon_ctx *context);
|
||||||
int (*after_sampling)(struct damon_ctx *context);
|
int (*after_sampling)(struct damon_ctx *context);
|
||||||
int (*after_aggregation)(struct damon_ctx *context);
|
int (*after_aggregation)(struct damon_ctx *context);
|
||||||
|
int (*before_damos_apply)(struct damon_ctx *context,
|
||||||
|
struct damon_target *target,
|
||||||
|
struct damon_region *region,
|
||||||
|
struct damos *scheme);
|
||||||
void (*before_terminate)(struct damon_ctx *context);
|
void (*before_terminate)(struct damon_ctx *context);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
23
include/trace/hooks/fs.h
Normal file
23
include/trace/hooks/fs.h
Normal file
|
|
@ -0,0 +1,23 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#undef TRACE_SYSTEM
|
||||||
|
#define TRACE_SYSTEM fs
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#define TRACE_INCLUDE_PATH trace/hooks
|
||||||
|
|
||||||
|
#if !defined(_TRACE_HOOK_FS_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||||
|
#define _TRACE_HOOK_FS_H
|
||||||
|
|
||||||
|
#include <trace/hooks/vendor_hooks.h>
|
||||||
|
|
||||||
|
DECLARE_HOOK(android_vh_ep_create_wakeup_source,
|
||||||
|
TP_PROTO(char *name, int len),
|
||||||
|
TP_ARGS(name, len));
|
||||||
|
|
||||||
|
DECLARE_HOOK(android_vh_timerfd_create,
|
||||||
|
TP_PROTO(char *name, int len),
|
||||||
|
TP_ARGS(name, len));
|
||||||
|
#endif /* _TRACE_HOOK_FS_H */
|
||||||
|
|
||||||
|
/* This part must be outside protection */
|
||||||
|
#include <trace/define_trace.h>
|
||||||
|
|
@ -7841,6 +7841,7 @@ change:
|
||||||
if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
|
if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
|
||||||
__setscheduler_params(p, attr);
|
__setscheduler_params(p, attr);
|
||||||
__setscheduler_prio(p, newprio);
|
__setscheduler_prio(p, newprio);
|
||||||
|
trace_android_rvh_setscheduler(p);
|
||||||
}
|
}
|
||||||
__setscheduler_uclamp(p, attr);
|
__setscheduler_uclamp(p, attr);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
obj-y := core.o
|
obj-y := core.o
|
||||||
obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o
|
obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o
|
||||||
obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o
|
obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o
|
||||||
obj-$(CONFIG_DAMON_SYSFS) += sysfs.o
|
obj-$(CONFIG_DAMON_SYSFS) += sysfs-common.o sysfs-schemes.o sysfs.o
|
||||||
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
|
obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o
|
||||||
obj-$(CONFIG_DAMON_RECLAIM) += reclaim.o
|
obj-$(CONFIG_DAMON_RECLAIM) += modules-common.o reclaim.o
|
||||||
obj-$(CONFIG_DAMON_LRU_SORT) += lru_sort.o
|
obj-$(CONFIG_DAMON_LRU_SORT) += modules-common.o lru_sort.o
|
||||||
|
|
|
||||||
269
mm/damon/core.c
269
mm/damon/core.c
|
|
@ -694,6 +694,115 @@ static bool damos_valid_target(struct damon_ctx *c, struct damon_target *t,
|
||||||
return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
|
return c->ops.get_scheme_score(c, t, r, s) >= s->quota.min_score;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* damos_skip_charged_region() - Check if the given region or starting part of
|
||||||
|
* it is already charged for the DAMOS quota.
|
||||||
|
* @t: The target of the region.
|
||||||
|
* @rp: The pointer to the region.
|
||||||
|
* @s: The scheme to be applied.
|
||||||
|
*
|
||||||
|
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
|
||||||
|
* action would applied to only a part of the target access pattern fulfilling
|
||||||
|
* regions. To avoid applying the scheme action to only already applied
|
||||||
|
* regions, DAMON skips applying the scheme action to the regions that charged
|
||||||
|
* in the previous charge window.
|
||||||
|
*
|
||||||
|
* This function checks if a given region should be skipped or not for the
|
||||||
|
* reason. If only the starting part of the region has previously charged,
|
||||||
|
* this function splits the region into two so that the second one covers the
|
||||||
|
* area that not charged in the previous charge widnow and saves the second
|
||||||
|
* region in *rp and returns false, so that the caller can apply DAMON action
|
||||||
|
* to the second one.
|
||||||
|
*
|
||||||
|
* Return: true if the region should be entirely skipped, false otherwise.
|
||||||
|
*/
|
||||||
|
static bool damos_skip_charged_region(struct damon_target *t,
|
||||||
|
struct damon_region **rp, struct damos *s)
|
||||||
|
{
|
||||||
|
struct damon_region *r = *rp;
|
||||||
|
struct damos_quota *quota = &s->quota;
|
||||||
|
unsigned long sz_to_skip;
|
||||||
|
|
||||||
|
/* Skip previously charged regions */
|
||||||
|
if (quota->charge_target_from) {
|
||||||
|
if (t != quota->charge_target_from)
|
||||||
|
return true;
|
||||||
|
if (r == damon_last_region(t)) {
|
||||||
|
quota->charge_target_from = NULL;
|
||||||
|
quota->charge_addr_from = 0;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (quota->charge_addr_from &&
|
||||||
|
r->ar.end <= quota->charge_addr_from)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (quota->charge_addr_from && r->ar.start <
|
||||||
|
quota->charge_addr_from) {
|
||||||
|
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
|
||||||
|
r->ar.start, DAMON_MIN_REGION);
|
||||||
|
if (!sz_to_skip) {
|
||||||
|
if (damon_sz_region(r) <= DAMON_MIN_REGION)
|
||||||
|
return true;
|
||||||
|
sz_to_skip = DAMON_MIN_REGION;
|
||||||
|
}
|
||||||
|
damon_split_region_at(t, r, sz_to_skip);
|
||||||
|
r = damon_next_region(r);
|
||||||
|
*rp = r;
|
||||||
|
}
|
||||||
|
quota->charge_target_from = NULL;
|
||||||
|
quota->charge_addr_from = 0;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void damos_update_stat(struct damos *s,
|
||||||
|
unsigned long sz_tried, unsigned long sz_applied)
|
||||||
|
{
|
||||||
|
s->stat.nr_tried++;
|
||||||
|
s->stat.sz_tried += sz_tried;
|
||||||
|
if (sz_applied)
|
||||||
|
s->stat.nr_applied++;
|
||||||
|
s->stat.sz_applied += sz_applied;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t,
|
||||||
|
struct damon_region *r, struct damos *s)
|
||||||
|
{
|
||||||
|
struct damos_quota *quota = &s->quota;
|
||||||
|
unsigned long sz = damon_sz_region(r);
|
||||||
|
struct timespec64 begin, end;
|
||||||
|
unsigned long sz_applied = 0;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
if (c->ops.apply_scheme) {
|
||||||
|
if (quota->esz && quota->charged_sz + sz > quota->esz) {
|
||||||
|
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
|
||||||
|
DAMON_MIN_REGION);
|
||||||
|
if (!sz)
|
||||||
|
goto update_stat;
|
||||||
|
damon_split_region_at(t, r, sz);
|
||||||
|
}
|
||||||
|
ktime_get_coarse_ts64(&begin);
|
||||||
|
if (c->callback.before_damos_apply)
|
||||||
|
err = c->callback.before_damos_apply(c, t, r, s);
|
||||||
|
if (!err)
|
||||||
|
sz_applied = c->ops.apply_scheme(c, t, r, s);
|
||||||
|
ktime_get_coarse_ts64(&end);
|
||||||
|
quota->total_charged_ns += timespec64_to_ns(&end) -
|
||||||
|
timespec64_to_ns(&begin);
|
||||||
|
quota->charged_sz += sz;
|
||||||
|
if (quota->esz && quota->charged_sz >= quota->esz) {
|
||||||
|
quota->charge_target_from = t;
|
||||||
|
quota->charge_addr_from = r->ar.end + 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (s->action != DAMOS_STAT)
|
||||||
|
r->age = 0;
|
||||||
|
|
||||||
|
update_stat:
|
||||||
|
damos_update_stat(s, sz, sz_applied);
|
||||||
|
}
|
||||||
|
|
||||||
static void damon_do_apply_schemes(struct damon_ctx *c,
|
static void damon_do_apply_schemes(struct damon_ctx *c,
|
||||||
struct damon_target *t,
|
struct damon_target *t,
|
||||||
struct damon_region *r)
|
struct damon_region *r)
|
||||||
|
|
@ -702,9 +811,6 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
|
||||||
|
|
||||||
damon_for_each_scheme(s, c) {
|
damon_for_each_scheme(s, c) {
|
||||||
struct damos_quota *quota = &s->quota;
|
struct damos_quota *quota = &s->quota;
|
||||||
unsigned long sz = damon_sz_region(r);
|
|
||||||
struct timespec64 begin, end;
|
|
||||||
unsigned long sz_applied = 0;
|
|
||||||
|
|
||||||
if (!s->wmarks.activated)
|
if (!s->wmarks.activated)
|
||||||
continue;
|
continue;
|
||||||
|
|
@ -713,70 +819,13 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
|
||||||
if (quota->esz && quota->charged_sz >= quota->esz)
|
if (quota->esz && quota->charged_sz >= quota->esz)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Skip previously charged regions */
|
if (damos_skip_charged_region(t, &r, s))
|
||||||
if (quota->charge_target_from) {
|
continue;
|
||||||
if (t != quota->charge_target_from)
|
|
||||||
continue;
|
|
||||||
if (r == damon_last_region(t)) {
|
|
||||||
quota->charge_target_from = NULL;
|
|
||||||
quota->charge_addr_from = 0;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (quota->charge_addr_from &&
|
|
||||||
r->ar.end <= quota->charge_addr_from)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (quota->charge_addr_from && r->ar.start <
|
|
||||||
quota->charge_addr_from) {
|
|
||||||
sz = ALIGN_DOWN(quota->charge_addr_from -
|
|
||||||
r->ar.start, DAMON_MIN_REGION);
|
|
||||||
if (!sz) {
|
|
||||||
if (damon_sz_region(r) <=
|
|
||||||
DAMON_MIN_REGION)
|
|
||||||
continue;
|
|
||||||
sz = DAMON_MIN_REGION;
|
|
||||||
}
|
|
||||||
damon_split_region_at(t, r, sz);
|
|
||||||
r = damon_next_region(r);
|
|
||||||
sz = damon_sz_region(r);
|
|
||||||
}
|
|
||||||
quota->charge_target_from = NULL;
|
|
||||||
quota->charge_addr_from = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!damos_valid_target(c, t, r, s))
|
if (!damos_valid_target(c, t, r, s))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Apply the scheme */
|
damos_apply_scheme(c, t, r, s);
|
||||||
if (c->ops.apply_scheme) {
|
|
||||||
if (quota->esz &&
|
|
||||||
quota->charged_sz + sz > quota->esz) {
|
|
||||||
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
|
|
||||||
DAMON_MIN_REGION);
|
|
||||||
if (!sz)
|
|
||||||
goto update_stat;
|
|
||||||
damon_split_region_at(t, r, sz);
|
|
||||||
}
|
|
||||||
ktime_get_coarse_ts64(&begin);
|
|
||||||
sz_applied = c->ops.apply_scheme(c, t, r, s);
|
|
||||||
ktime_get_coarse_ts64(&end);
|
|
||||||
quota->total_charged_ns += timespec64_to_ns(&end) -
|
|
||||||
timespec64_to_ns(&begin);
|
|
||||||
quota->charged_sz += sz;
|
|
||||||
if (quota->esz && quota->charged_sz >= quota->esz) {
|
|
||||||
quota->charge_target_from = t;
|
|
||||||
quota->charge_addr_from = r->ar.end + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (s->action != DAMOS_STAT)
|
|
||||||
r->age = 0;
|
|
||||||
|
|
||||||
update_stat:
|
|
||||||
s->stat.nr_tried++;
|
|
||||||
s->stat.sz_tried += sz;
|
|
||||||
if (sz_applied)
|
|
||||||
s->stat.nr_applied++;
|
|
||||||
s->stat.sz_applied += sz_applied;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -803,6 +852,53 @@ static void damos_set_effective_quota(struct damos_quota *quota)
|
||||||
quota->esz = esz;
|
quota->esz = esz;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void damos_adjust_quota(struct damon_ctx *c, struct damos *s)
|
||||||
|
{
|
||||||
|
struct damos_quota *quota = &s->quota;
|
||||||
|
struct damon_target *t;
|
||||||
|
struct damon_region *r;
|
||||||
|
unsigned long cumulated_sz;
|
||||||
|
unsigned int score, max_score = 0;
|
||||||
|
|
||||||
|
if (!quota->ms && !quota->sz)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* New charge window starts */
|
||||||
|
if (time_after_eq(jiffies, quota->charged_from +
|
||||||
|
msecs_to_jiffies(quota->reset_interval))) {
|
||||||
|
if (quota->esz && quota->charged_sz >= quota->esz)
|
||||||
|
s->stat.qt_exceeds++;
|
||||||
|
quota->total_charged_sz += quota->charged_sz;
|
||||||
|
quota->charged_from = jiffies;
|
||||||
|
quota->charged_sz = 0;
|
||||||
|
damos_set_effective_quota(quota);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!c->ops.get_scheme_score)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Fill up the score histogram */
|
||||||
|
memset(quota->histogram, 0, sizeof(quota->histogram));
|
||||||
|
damon_for_each_target(t, c) {
|
||||||
|
damon_for_each_region(r, t) {
|
||||||
|
if (!__damos_valid_target(r, s))
|
||||||
|
continue;
|
||||||
|
score = c->ops.get_scheme_score(c, t, r, s);
|
||||||
|
quota->histogram[score] += damon_sz_region(r);
|
||||||
|
if (score > max_score)
|
||||||
|
max_score = score;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set the min score limit */
|
||||||
|
for (cumulated_sz = 0, score = max_score; ; score--) {
|
||||||
|
cumulated_sz += quota->histogram[score];
|
||||||
|
if (cumulated_sz >= quota->esz || !score)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
quota->min_score = score;
|
||||||
|
}
|
||||||
|
|
||||||
static void kdamond_apply_schemes(struct damon_ctx *c)
|
static void kdamond_apply_schemes(struct damon_ctx *c)
|
||||||
{
|
{
|
||||||
struct damon_target *t;
|
struct damon_target *t;
|
||||||
|
|
@ -810,52 +906,10 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
|
||||||
struct damos *s;
|
struct damos *s;
|
||||||
|
|
||||||
damon_for_each_scheme(s, c) {
|
damon_for_each_scheme(s, c) {
|
||||||
struct damos_quota *quota = &s->quota;
|
|
||||||
unsigned long cumulated_sz;
|
|
||||||
unsigned int score, max_score = 0;
|
|
||||||
|
|
||||||
if (!s->wmarks.activated)
|
if (!s->wmarks.activated)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (!quota->ms && !quota->sz)
|
damos_adjust_quota(c, s);
|
||||||
continue;
|
|
||||||
|
|
||||||
/* New charge window starts */
|
|
||||||
if (time_after_eq(jiffies, quota->charged_from +
|
|
||||||
msecs_to_jiffies(
|
|
||||||
quota->reset_interval))) {
|
|
||||||
if (quota->esz && quota->charged_sz >= quota->esz)
|
|
||||||
s->stat.qt_exceeds++;
|
|
||||||
quota->total_charged_sz += quota->charged_sz;
|
|
||||||
quota->charged_from = jiffies;
|
|
||||||
quota->charged_sz = 0;
|
|
||||||
damos_set_effective_quota(quota);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!c->ops.get_scheme_score)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Fill up the score histogram */
|
|
||||||
memset(quota->histogram, 0, sizeof(quota->histogram));
|
|
||||||
damon_for_each_target(t, c) {
|
|
||||||
damon_for_each_region(r, t) {
|
|
||||||
if (!__damos_valid_target(r, s))
|
|
||||||
continue;
|
|
||||||
score = c->ops.get_scheme_score(
|
|
||||||
c, t, r, s);
|
|
||||||
quota->histogram[score] += damon_sz_region(r);
|
|
||||||
if (score > max_score)
|
|
||||||
max_score = score;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Set the min score limit */
|
|
||||||
for (cumulated_sz = 0, score = max_score; ; score--) {
|
|
||||||
cumulated_sz += quota->histogram[score];
|
|
||||||
if (cumulated_sz >= quota->esz || !score)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
quota->min_score = score;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
damon_for_each_target(t, c) {
|
damon_for_each_target(t, c) {
|
||||||
|
|
@ -1176,7 +1230,8 @@ static int kdamond_fn(void *data)
|
||||||
if (ctx->callback.after_aggregation &&
|
if (ctx->callback.after_aggregation &&
|
||||||
ctx->callback.after_aggregation(ctx))
|
ctx->callback.after_aggregation(ctx))
|
||||||
break;
|
break;
|
||||||
kdamond_apply_schemes(ctx);
|
if (!list_empty(&ctx->schemes))
|
||||||
|
kdamond_apply_schemes(ctx);
|
||||||
kdamond_reset_aggregated(ctx);
|
kdamond_reset_aggregated(ctx);
|
||||||
kdamond_split_regions(ctx);
|
kdamond_split_regions(ctx);
|
||||||
if (ctx->ops.reset_aggregated)
|
if (ctx->ops.reset_aggregated)
|
||||||
|
|
|
||||||
|
|
@ -8,10 +8,8 @@
|
||||||
#define pr_fmt(fmt) "damon-lru-sort: " fmt
|
#define pr_fmt(fmt) "damon-lru-sort: " fmt
|
||||||
|
|
||||||
#include <linux/damon.h>
|
#include <linux/damon.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/kstrtox.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/workqueue.h>
|
|
||||||
|
|
||||||
#include "modules-common.h"
|
#include "modules-common.h"
|
||||||
|
|
||||||
|
|
@ -235,38 +233,31 @@ static int damon_lru_sort_turn(bool on)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct delayed_work damon_lru_sort_timer;
|
|
||||||
static void damon_lru_sort_timer_fn(struct work_struct *work)
|
|
||||||
{
|
|
||||||
static bool last_enabled;
|
|
||||||
bool now_enabled;
|
|
||||||
|
|
||||||
now_enabled = enabled;
|
|
||||||
if (last_enabled != now_enabled) {
|
|
||||||
if (!damon_lru_sort_turn(now_enabled))
|
|
||||||
last_enabled = now_enabled;
|
|
||||||
else
|
|
||||||
enabled = last_enabled;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static DECLARE_DELAYED_WORK(damon_lru_sort_timer, damon_lru_sort_timer_fn);
|
|
||||||
|
|
||||||
static bool damon_lru_sort_initialized;
|
|
||||||
|
|
||||||
static int damon_lru_sort_enabled_store(const char *val,
|
static int damon_lru_sort_enabled_store(const char *val,
|
||||||
const struct kernel_param *kp)
|
const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
int rc = param_set_bool(val, kp);
|
bool is_enabled = enabled;
|
||||||
|
bool enable;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (rc < 0)
|
err = kstrtobool(val, &enable);
|
||||||
return rc;
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
if (!damon_lru_sort_initialized)
|
if (is_enabled == enable)
|
||||||
return rc;
|
return 0;
|
||||||
|
|
||||||
schedule_delayed_work(&damon_lru_sort_timer, 0);
|
/* Called before init function. The function will handle this. */
|
||||||
|
if (!ctx)
|
||||||
|
goto set_param_out;
|
||||||
|
|
||||||
return 0;
|
err = damon_lru_sort_turn(enable);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
set_param_out:
|
||||||
|
enabled = enable;
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct kernel_param_ops enabled_param_ops = {
|
static const struct kernel_param_ops enabled_param_ops = {
|
||||||
|
|
@ -312,29 +303,19 @@ static int damon_lru_sort_after_wmarks_check(struct damon_ctx *c)
|
||||||
|
|
||||||
static int __init damon_lru_sort_init(void)
|
static int __init damon_lru_sort_init(void)
|
||||||
{
|
{
|
||||||
ctx = damon_new_ctx();
|
int err = damon_modules_new_paddr_ctx_target(&ctx, &target);
|
||||||
if (!ctx)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
|
if (err)
|
||||||
damon_destroy_ctx(ctx);
|
return err;
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
|
ctx->callback.after_wmarks_check = damon_lru_sort_after_wmarks_check;
|
||||||
ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
|
ctx->callback.after_aggregation = damon_lru_sort_after_aggregation;
|
||||||
|
|
||||||
target = damon_new_target();
|
/* 'enabled' has set before this function, probably via command line */
|
||||||
if (!target) {
|
if (enabled)
|
||||||
damon_destroy_ctx(ctx);
|
err = damon_lru_sort_turn(true);
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
damon_add_target(ctx, target);
|
|
||||||
|
|
||||||
schedule_delayed_work(&damon_lru_sort_timer, 0);
|
return err;
|
||||||
|
|
||||||
damon_lru_sort_initialized = true;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(damon_lru_sort_init);
|
module_init(damon_lru_sort_init);
|
||||||
|
|
|
||||||
42
mm/damon/modules-common.c
Normal file
42
mm/damon/modules-common.c
Normal file
|
|
@ -0,0 +1,42 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Common Primitives for DAMON Modules
|
||||||
|
*
|
||||||
|
* Author: SeongJae Park <sjpark@amazon.de>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/damon.h>
|
||||||
|
|
||||||
|
#include "modules-common.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Allocate, set, and return a DAMON context for the physical address space.
|
||||||
|
* @ctxp: Pointer to save the point to the newly created context
|
||||||
|
* @targetp: Pointer to save the point to the newly created target
|
||||||
|
*/
|
||||||
|
int damon_modules_new_paddr_ctx_target(struct damon_ctx **ctxp,
|
||||||
|
struct damon_target **targetp)
|
||||||
|
{
|
||||||
|
struct damon_ctx *ctx;
|
||||||
|
struct damon_target *target;
|
||||||
|
|
||||||
|
ctx = damon_new_ctx();
|
||||||
|
if (!ctx)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
|
||||||
|
damon_destroy_ctx(ctx);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
target = damon_new_target();
|
||||||
|
if (!target) {
|
||||||
|
damon_destroy_ctx(ctx);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
damon_add_target(ctx, target);
|
||||||
|
|
||||||
|
*ctxp = ctx;
|
||||||
|
*targetp = target;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
@ -44,3 +44,6 @@
|
||||||
0400); \
|
0400); \
|
||||||
module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \
|
module_param_named(nr_##qt_exceed_name, stat.qt_exceeds, ulong, \
|
||||||
0400);
|
0400);
|
||||||
|
|
||||||
|
int damon_modules_new_paddr_ctx_target(struct damon_ctx **ctxp,
|
||||||
|
struct damon_target **targetp);
|
||||||
|
|
|
||||||
|
|
@ -35,21 +35,12 @@ struct page *damon_get_page(unsigned long pfn)
|
||||||
|
|
||||||
void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
|
void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr)
|
||||||
{
|
{
|
||||||
bool referenced = false;
|
|
||||||
struct page *page = damon_get_page(pte_pfn(*pte));
|
struct page *page = damon_get_page(pte_pfn(*pte));
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (ptep_test_and_clear_young(vma, addr, pte))
|
if (ptep_clear_young_notify(vma, addr, pte))
|
||||||
referenced = true;
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU_NOTIFIER
|
|
||||||
if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE))
|
|
||||||
referenced = true;
|
|
||||||
#endif /* CONFIG_MMU_NOTIFIER */
|
|
||||||
|
|
||||||
if (referenced)
|
|
||||||
set_page_young(page);
|
set_page_young(page);
|
||||||
|
|
||||||
set_page_idle(page);
|
set_page_idle(page);
|
||||||
|
|
@ -59,21 +50,12 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr
|
||||||
void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
|
void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
bool referenced = false;
|
|
||||||
struct page *page = damon_get_page(pmd_pfn(*pmd));
|
struct page *page = damon_get_page(pmd_pfn(*pmd));
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (pmdp_test_and_clear_young(vma, addr, pmd))
|
if (pmdp_clear_young_notify(vma, addr, pmd))
|
||||||
referenced = true;
|
|
||||||
|
|
||||||
#ifdef CONFIG_MMU_NOTIFIER
|
|
||||||
if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE))
|
|
||||||
referenced = true;
|
|
||||||
#endif /* CONFIG_MMU_NOTIFIER */
|
|
||||||
|
|
||||||
if (referenced)
|
|
||||||
set_page_young(page);
|
set_page_young(page);
|
||||||
|
|
||||||
set_page_idle(page);
|
set_page_idle(page);
|
||||||
|
|
|
||||||
|
|
@ -8,10 +8,8 @@
|
||||||
#define pr_fmt(fmt) "damon-reclaim: " fmt
|
#define pr_fmt(fmt) "damon-reclaim: " fmt
|
||||||
|
|
||||||
#include <linux/damon.h>
|
#include <linux/damon.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/kstrtox.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/workqueue.h>
|
|
||||||
|
|
||||||
#include "modules-common.h"
|
#include "modules-common.h"
|
||||||
|
|
||||||
|
|
@ -183,38 +181,31 @@ static int damon_reclaim_turn(bool on)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct delayed_work damon_reclaim_timer;
|
|
||||||
static void damon_reclaim_timer_fn(struct work_struct *work)
|
|
||||||
{
|
|
||||||
static bool last_enabled;
|
|
||||||
bool now_enabled;
|
|
||||||
|
|
||||||
now_enabled = enabled;
|
|
||||||
if (last_enabled != now_enabled) {
|
|
||||||
if (!damon_reclaim_turn(now_enabled))
|
|
||||||
last_enabled = now_enabled;
|
|
||||||
else
|
|
||||||
enabled = last_enabled;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn);
|
|
||||||
|
|
||||||
static bool damon_reclaim_initialized;
|
|
||||||
|
|
||||||
static int damon_reclaim_enabled_store(const char *val,
|
static int damon_reclaim_enabled_store(const char *val,
|
||||||
const struct kernel_param *kp)
|
const struct kernel_param *kp)
|
||||||
{
|
{
|
||||||
int rc = param_set_bool(val, kp);
|
bool is_enabled = enabled;
|
||||||
|
bool enable;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (rc < 0)
|
err = kstrtobool(val, &enable);
|
||||||
return rc;
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
/* system_wq might not initialized yet */
|
if (is_enabled == enable)
|
||||||
if (!damon_reclaim_initialized)
|
return 0;
|
||||||
return rc;
|
|
||||||
|
|
||||||
schedule_delayed_work(&damon_reclaim_timer, 0);
|
/* Called before init function. The function will handle this. */
|
||||||
return 0;
|
if (!ctx)
|
||||||
|
goto set_param_out;
|
||||||
|
|
||||||
|
err = damon_reclaim_turn(enable);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
set_param_out:
|
||||||
|
enabled = enable;
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct kernel_param_ops enabled_param_ops = {
|
static const struct kernel_param_ops enabled_param_ops = {
|
||||||
|
|
@ -256,29 +247,19 @@ static int damon_reclaim_after_wmarks_check(struct damon_ctx *c)
|
||||||
|
|
||||||
static int __init damon_reclaim_init(void)
|
static int __init damon_reclaim_init(void)
|
||||||
{
|
{
|
||||||
ctx = damon_new_ctx();
|
int err = damon_modules_new_paddr_ctx_target(&ctx, &target);
|
||||||
if (!ctx)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (damon_select_ops(ctx, DAMON_OPS_PADDR)) {
|
if (err)
|
||||||
damon_destroy_ctx(ctx);
|
return err;
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
|
ctx->callback.after_wmarks_check = damon_reclaim_after_wmarks_check;
|
||||||
ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
|
ctx->callback.after_aggregation = damon_reclaim_after_aggregation;
|
||||||
|
|
||||||
target = damon_new_target();
|
/* 'enabled' has set before this function, probably via command line */
|
||||||
if (!target) {
|
if (enabled)
|
||||||
damon_destroy_ctx(ctx);
|
err = damon_reclaim_turn(true);
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
damon_add_target(ctx, target);
|
|
||||||
|
|
||||||
schedule_delayed_work(&damon_reclaim_timer, 0);
|
return err;
|
||||||
|
|
||||||
damon_reclaim_initialized = true;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module_init(damon_reclaim_init);
|
module_init(damon_reclaim_init);
|
||||||
|
|
|
||||||
107
mm/damon/sysfs-common.c
Normal file
107
mm/damon/sysfs-common.c
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
/*
|
||||||
|
* Common Primitives for DAMON Sysfs Interface
|
||||||
|
*
|
||||||
|
* Author: SeongJae Park <sj@kernel.org>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include "sysfs-common.h"
|
||||||
|
|
||||||
|
DEFINE_MUTEX(damon_sysfs_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* unsigned long range directory
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
|
||||||
|
unsigned long min,
|
||||||
|
unsigned long max)
|
||||||
|
{
|
||||||
|
struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range),
|
||||||
|
GFP_KERNEL);
|
||||||
|
|
||||||
|
if (!range)
|
||||||
|
return NULL;
|
||||||
|
range->kobj = (struct kobject){};
|
||||||
|
range->min = min;
|
||||||
|
range->max = max;
|
||||||
|
|
||||||
|
return range;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t min_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct damon_sysfs_ul_range *range = container_of(kobj,
|
||||||
|
struct damon_sysfs_ul_range, kobj);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%lu\n", range->min);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t min_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct damon_sysfs_ul_range *range = container_of(kobj,
|
||||||
|
struct damon_sysfs_ul_range, kobj);
|
||||||
|
unsigned long min;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = kstrtoul(buf, 0, &min);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
range->min = min;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t max_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct damon_sysfs_ul_range *range = container_of(kobj,
|
||||||
|
struct damon_sysfs_ul_range, kobj);
|
||||||
|
|
||||||
|
return sysfs_emit(buf, "%lu\n", range->max);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t max_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
|
const char *buf, size_t count)
|
||||||
|
{
|
||||||
|
struct damon_sysfs_ul_range *range = container_of(kobj,
|
||||||
|
struct damon_sysfs_ul_range, kobj);
|
||||||
|
unsigned long max;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = kstrtoul(buf, 0, &max);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
range->max = max;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void damon_sysfs_ul_range_release(struct kobject *kobj)
|
||||||
|
{
|
||||||
|
kfree(container_of(kobj, struct damon_sysfs_ul_range, kobj));
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct kobj_attribute damon_sysfs_ul_range_min_attr =
|
||||||
|
__ATTR_RW_MODE(min, 0600);
|
||||||
|
|
||||||
|
static struct kobj_attribute damon_sysfs_ul_range_max_attr =
|
||||||
|
__ATTR_RW_MODE(max, 0600);
|
||||||
|
|
||||||
|
static struct attribute *damon_sysfs_ul_range_attrs[] = {
|
||||||
|
&damon_sysfs_ul_range_min_attr.attr,
|
||||||
|
&damon_sysfs_ul_range_max_attr.attr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
|
||||||
|
|
||||||
|
struct kobj_type damon_sysfs_ul_range_ktype = {
|
||||||
|
.release = damon_sysfs_ul_range_release,
|
||||||
|
.sysfs_ops = &kobj_sysfs_ops,
|
||||||
|
.default_groups = damon_sysfs_ul_range_groups,
|
||||||
|
};
|
||||||
|
|
||||||
58
mm/damon/sysfs-common.h
Normal file
58
mm/damon/sysfs-common.h
Normal file
|
|
@ -0,0 +1,58 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
/*
|
||||||
|
* Common Primitives for DAMON Sysfs Interface
|
||||||
|
*
|
||||||
|
* Author: SeongJae Park <sj@kernel.org>
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/damon.h>
|
||||||
|
#include <linux/kobject.h>
|
||||||
|
|
||||||
|
extern struct mutex damon_sysfs_lock;
|
||||||
|
|
||||||
|
struct damon_sysfs_ul_range {
|
||||||
|
struct kobject kobj;
|
||||||
|
unsigned long min;
|
||||||
|
unsigned long max;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
|
||||||
|
unsigned long min,
|
||||||
|
unsigned long max);
|
||||||
|
void damon_sysfs_ul_range_release(struct kobject *kobj);
|
||||||
|
|
||||||
|
extern struct kobj_type damon_sysfs_ul_range_ktype;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* schemes directory
|
||||||
|
*/
|
||||||
|
|
||||||
|
struct damon_sysfs_schemes {
|
||||||
|
struct kobject kobj;
|
||||||
|
struct damon_sysfs_scheme **schemes_arr;
|
||||||
|
int nr;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void);
|
||||||
|
void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes);
|
||||||
|
|
||||||
|
extern struct kobj_type damon_sysfs_schemes_ktype;
|
||||||
|
|
||||||
|
int damon_sysfs_set_schemes(struct damon_ctx *ctx,
|
||||||
|
struct damon_sysfs_schemes *sysfs_schemes);
|
||||||
|
|
||||||
|
void damon_sysfs_schemes_update_stats(
|
||||||
|
struct damon_sysfs_schemes *sysfs_schemes,
|
||||||
|
struct damon_ctx *ctx);
|
||||||
|
|
||||||
|
int damon_sysfs_schemes_update_regions_start(
|
||||||
|
struct damon_sysfs_schemes *sysfs_schemes,
|
||||||
|
struct damon_ctx *ctx, bool total_bytes_only);
|
||||||
|
|
||||||
|
bool damos_sysfs_regions_upd_done(void);
|
||||||
|
|
||||||
|
int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx);
|
||||||
|
|
||||||
|
int damon_sysfs_schemes_clear_regions(
|
||||||
|
struct damon_sysfs_schemes *sysfs_schemes,
|
||||||
|
struct damon_ctx *ctx);
|
||||||
1458
mm/damon/sysfs-schemes.c
Normal file
1458
mm/damon/sysfs-schemes.c
Normal file
File diff suppressed because it is too large
Load diff
1326
mm/damon/sysfs.c
1326
mm/damon/sysfs.c
File diff suppressed because it is too large
Load diff
|
|
@ -74,6 +74,8 @@ my $git_command ='export LANGUAGE=en_US.UTF-8; git';
|
||||||
my $tabsize = 8;
|
my $tabsize = 8;
|
||||||
my ${CONFIG_} = "CONFIG_";
|
my ${CONFIG_} = "CONFIG_";
|
||||||
|
|
||||||
|
my %maybe_linker_symbol; # for externs in c exceptions, when seen in *vmlinux.lds.h
|
||||||
|
|
||||||
sub help {
|
sub help {
|
||||||
my ($exitcode) = @_;
|
my ($exitcode) = @_;
|
||||||
|
|
||||||
|
|
@ -620,6 +622,22 @@ our $signature_tags = qr{(?xi:
|
||||||
Cc:
|
Cc:
|
||||||
)};
|
)};
|
||||||
|
|
||||||
|
our @link_tags = qw(Link Closes);
|
||||||
|
|
||||||
|
#Create a search and print patterns for all these strings to be used directly below
|
||||||
|
our $link_tags_search = "";
|
||||||
|
our $link_tags_print = "";
|
||||||
|
foreach my $entry (@link_tags) {
|
||||||
|
if ($link_tags_search ne "") {
|
||||||
|
$link_tags_search .= '|';
|
||||||
|
$link_tags_print .= ' or ';
|
||||||
|
}
|
||||||
|
$entry .= ':';
|
||||||
|
$link_tags_search .= $entry;
|
||||||
|
$link_tags_print .= "'$entry'";
|
||||||
|
}
|
||||||
|
$link_tags_search = "(?:${link_tags_search})";
|
||||||
|
|
||||||
our $tracing_logging_tags = qr{(?xi:
|
our $tracing_logging_tags = qr{(?xi:
|
||||||
[=-]*> |
|
[=-]*> |
|
||||||
<[=-]* |
|
<[=-]* |
|
||||||
|
|
@ -702,6 +720,17 @@ sub find_standard_signature {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
our $obsolete_archives = qr{(?xi:
|
||||||
|
\Qfreedesktop.org/archives/dri-devel\E |
|
||||||
|
\Qlists.infradead.org\E |
|
||||||
|
\Qlkml.org\E |
|
||||||
|
\Qmail-archive.com\E |
|
||||||
|
\Qmailman.alsa-project.org/pipermail\E |
|
||||||
|
\Qmarc.info\E |
|
||||||
|
\Qozlabs.org/pipermail\E |
|
||||||
|
\Qspinics.net\E
|
||||||
|
)};
|
||||||
|
|
||||||
our @typeListMisordered = (
|
our @typeListMisordered = (
|
||||||
qr{char\s+(?:un)?signed},
|
qr{char\s+(?:un)?signed},
|
||||||
qr{int\s+(?:(?:un)?signed\s+)?short\s},
|
qr{int\s+(?:(?:un)?signed\s+)?short\s},
|
||||||
|
|
@ -812,7 +841,9 @@ our %deprecated_apis = (
|
||||||
"get_state_synchronize_sched" => "get_state_synchronize_rcu",
|
"get_state_synchronize_sched" => "get_state_synchronize_rcu",
|
||||||
"cond_synchronize_sched" => "cond_synchronize_rcu",
|
"cond_synchronize_sched" => "cond_synchronize_rcu",
|
||||||
"kmap" => "kmap_local_page",
|
"kmap" => "kmap_local_page",
|
||||||
|
"kunmap" => "kunmap_local",
|
||||||
"kmap_atomic" => "kmap_local_page",
|
"kmap_atomic" => "kmap_local_page",
|
||||||
|
"kunmap_atomic" => "kunmap_local",
|
||||||
);
|
);
|
||||||
|
|
||||||
#Create a search pattern for all these strings to speed up a loop below
|
#Create a search pattern for all these strings to speed up a loop below
|
||||||
|
|
@ -3131,21 +3162,33 @@ sub process {
|
||||||
if ($sign_off =~ /^co-developed-by:$/i) {
|
if ($sign_off =~ /^co-developed-by:$/i) {
|
||||||
if ($email eq $author) {
|
if ($email eq $author) {
|
||||||
WARN("BAD_SIGN_OFF",
|
WARN("BAD_SIGN_OFF",
|
||||||
"Co-developed-by: should not be used to attribute nominal patch author '$author'\n" . "$here\n" . $rawline);
|
"Co-developed-by: should not be used to attribute nominal patch author '$author'\n" . $herecurr);
|
||||||
}
|
}
|
||||||
if (!defined $lines[$linenr]) {
|
if (!defined $lines[$linenr]) {
|
||||||
WARN("BAD_SIGN_OFF",
|
WARN("BAD_SIGN_OFF",
|
||||||
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . "$here\n" . $rawline);
|
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . $herecurr);
|
||||||
} elsif ($rawlines[$linenr] !~ /^\s*signed-off-by:\s*(.*)/i) {
|
} elsif ($rawlines[$linenr] !~ /^signed-off-by:\s*(.*)/i) {
|
||||||
WARN("BAD_SIGN_OFF",
|
WARN("BAD_SIGN_OFF",
|
||||||
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . "$here\n" . $rawline . "\n" .$rawlines[$linenr]);
|
"Co-developed-by: must be immediately followed by Signed-off-by:\n" . $herecurr . $rawlines[$linenr] . "\n");
|
||||||
} elsif ($1 ne $email) {
|
} elsif ($1 ne $email) {
|
||||||
WARN("BAD_SIGN_OFF",
|
WARN("BAD_SIGN_OFF",
|
||||||
"Co-developed-by and Signed-off-by: name/email do not match \n" . "$here\n" . $rawline . "\n" .$rawlines[$linenr]);
|
"Co-developed-by and Signed-off-by: name/email do not match\n" . $herecurr . $rawlines[$linenr] . "\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# check if Reported-by: is followed by a Closes: tag
|
||||||
|
if ($sign_off =~ /^reported(?:|-and-tested)-by:$/i) {
|
||||||
|
if (!defined $lines[$linenr]) {
|
||||||
|
WARN("BAD_REPORTED_BY_LINK",
|
||||||
|
"Reported-by: should be immediately followed by Closes: with a URL to the report\n" . $herecurr . "\n");
|
||||||
|
} elsif ($rawlines[$linenr] !~ /^closes:\s*/i) {
|
||||||
|
WARN("BAD_REPORTED_BY_LINK",
|
||||||
|
"Reported-by: should be immediately followed by Closes: with a URL to the report\n" . $herecurr . $rawlines[$linenr] . "\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Check Fixes: styles is correct
|
# Check Fixes: styles is correct
|
||||||
if (!$in_header_lines &&
|
if (!$in_header_lines &&
|
||||||
$line =~ /^\s*fixes:?\s*(?:commit\s*)?[0-9a-f]{5,}\b/i) {
|
$line =~ /^\s*fixes:?\s*(?:commit\s*)?[0-9a-f]{5,}\b/i) {
|
||||||
|
|
@ -3225,11 +3268,11 @@ sub process {
|
||||||
# file delta changes
|
# file delta changes
|
||||||
$line =~ /^\s*(?:[\w\.\-\+]*\/)++[\w\.\-\+]+:/ ||
|
$line =~ /^\s*(?:[\w\.\-\+]*\/)++[\w\.\-\+]+:/ ||
|
||||||
# filename then :
|
# filename then :
|
||||||
$line =~ /^\s*(?:Fixes:|Link:|$signature_tags)/i ||
|
$line =~ /^\s*(?:Fixes:|$link_tags_search|$signature_tags)/i ||
|
||||||
# A Fixes: or Link: line or signature tag line
|
# A Fixes:, link or signature tag line
|
||||||
$commit_log_possible_stack_dump)) {
|
$commit_log_possible_stack_dump)) {
|
||||||
WARN("COMMIT_LOG_LONG_LINE",
|
WARN("COMMIT_LOG_LONG_LINE",
|
||||||
"Possible unwrapped commit description (prefer a maximum 75 chars per line)\n" . $herecurr);
|
"Prefer a maximum 75 chars per line (possible unwrapped commit description?)\n" . $herecurr);
|
||||||
$commit_log_long_line = 1;
|
$commit_log_long_line = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -3239,6 +3282,29 @@ sub process {
|
||||||
$commit_log_possible_stack_dump = 0;
|
$commit_log_possible_stack_dump = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check for odd tags before a URI/URL
|
||||||
|
if ($in_commit_log &&
|
||||||
|
$line =~ /^\s*(\w+:)\s*http/ && $1 !~ /^$link_tags_search$/) {
|
||||||
|
if ($1 =~ /^v(?:ersion)?\d+/i) {
|
||||||
|
WARN("COMMIT_LOG_VERSIONING",
|
||||||
|
"Patch version information should be after the --- line\n" . $herecurr);
|
||||||
|
} else {
|
||||||
|
WARN("COMMIT_LOG_USE_LINK",
|
||||||
|
"Unknown link reference '$1', use $link_tags_print instead\n" . $herecurr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check for misuse of the link tags
|
||||||
|
if ($in_commit_log &&
|
||||||
|
$line =~ /^\s*(\w+:)\s*(\S+)/) {
|
||||||
|
my $tag = $1;
|
||||||
|
my $value = $2;
|
||||||
|
if ($tag =~ /^$link_tags_search$/ && $value !~ m{^https?://}) {
|
||||||
|
WARN("COMMIT_LOG_WRONG_LINK",
|
||||||
|
"'$tag' should be followed by a public http(s) link\n" . $herecurr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Check for lines starting with a #
|
# Check for lines starting with a #
|
||||||
if ($in_commit_log && $line =~ /^#/) {
|
if ($in_commit_log && $line =~ /^#/) {
|
||||||
if (WARN("COMMIT_COMMENT_SYMBOL",
|
if (WARN("COMMIT_COMMENT_SYMBOL",
|
||||||
|
|
@ -3324,6 +3390,12 @@ sub process {
|
||||||
$last_git_commit_id_linenr = $linenr if ($line =~ /\bcommit\s*$/i);
|
$last_git_commit_id_linenr = $linenr if ($line =~ /\bcommit\s*$/i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check for mailing list archives other than lore.kernel.org
|
||||||
|
if ($rawline =~ m{http.*\b$obsolete_archives}) {
|
||||||
|
WARN("PREFER_LORE_ARCHIVE",
|
||||||
|
"Use lore.kernel.org archive links when possible - see https://lore.kernel.org/lists.html\n" . $herecurr);
|
||||||
|
}
|
||||||
|
|
||||||
# Check for added, moved or deleted files
|
# Check for added, moved or deleted files
|
||||||
if (!$reported_maintainer_file && !$in_commit_log &&
|
if (!$reported_maintainer_file && !$in_commit_log &&
|
||||||
($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ ||
|
($line =~ /^(?:new|deleted) file mode\s*\d+\s*$/ ||
|
||||||
|
|
@ -3693,7 +3765,7 @@ sub process {
|
||||||
"'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
|
"'$spdx_license' is not supported in LICENSES/...\n" . $herecurr);
|
||||||
}
|
}
|
||||||
if ($realfile =~ m@^Documentation/devicetree/bindings/@ &&
|
if ($realfile =~ m@^Documentation/devicetree/bindings/@ &&
|
||||||
not $spdx_license =~ /GPL-2\.0.*BSD-2-Clause/) {
|
$spdx_license !~ /GPL-2\.0(?:-only)? OR BSD-2-Clause/) {
|
||||||
my $msg_level = \&WARN;
|
my $msg_level = \&WARN;
|
||||||
$msg_level = \&CHK if ($file);
|
$msg_level = \&CHK if ($file);
|
||||||
if (&{$msg_level}("SPDX_LICENSE_TAG",
|
if (&{$msg_level}("SPDX_LICENSE_TAG",
|
||||||
|
|
@ -3703,12 +3775,17 @@ sub process {
|
||||||
$fixed[$fixlinenr] =~ s/SPDX-License-Identifier: .*/SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)/;
|
$fixed[$fixlinenr] =~ s/SPDX-License-Identifier: .*/SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)/;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ($realfile =~ m@^include/dt-bindings/@ &&
|
||||||
|
$spdx_license !~ /GPL-2\.0(?:-only)? OR \S+/) {
|
||||||
|
WARN("SPDX_LICENSE_TAG",
|
||||||
|
"DT binding headers should be licensed (GPL-2.0-only OR .*)\n" . $herecurr);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for embedded filenames
|
# check for embedded filenames
|
||||||
if ($rawline =~ /^\+.*\Q$realfile\E/) {
|
if ($rawline =~ /^\+.*\b\Q$realfile\E\b/) {
|
||||||
WARN("EMBEDDED_FILENAME",
|
WARN("EMBEDDED_FILENAME",
|
||||||
"It's generally not useful to have the filename in the file\n" . $herecurr);
|
"It's generally not useful to have the filename in the file\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
@ -4971,7 +5048,7 @@ sub process {
|
||||||
if|for|while|switch|return|case|
|
if|for|while|switch|return|case|
|
||||||
volatile|__volatile__|
|
volatile|__volatile__|
|
||||||
__attribute__|format|__extension__|
|
__attribute__|format|__extension__|
|
||||||
asm|__asm__)$/x)
|
asm|__asm__|scoped_guard)$/x)
|
||||||
{
|
{
|
||||||
# cpp #define statements have non-optional spaces, ie
|
# cpp #define statements have non-optional spaces, ie
|
||||||
# if there is a space between the name and the open
|
# if there is a space between the name and the open
|
||||||
|
|
@ -5766,6 +5843,8 @@ sub process {
|
||||||
$var !~ /^(?:[A-Z]+_){1,5}[A-Z]{1,3}[a-z]/ &&
|
$var !~ /^(?:[A-Z]+_){1,5}[A-Z]{1,3}[a-z]/ &&
|
||||||
#Ignore Page<foo> variants
|
#Ignore Page<foo> variants
|
||||||
$var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
|
$var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
|
||||||
|
#Ignore ETHTOOL_LINK_MODE_<foo> variants
|
||||||
|
$var !~ /^ETHTOOL_LINK_MODE_/ &&
|
||||||
#Ignore SI style variants like nS, mV and dB
|
#Ignore SI style variants like nS, mV and dB
|
||||||
#(ie: max_uV, regulator_min_uA_show, RANGE_mA_VALUE)
|
#(ie: max_uV, regulator_min_uA_show, RANGE_mA_VALUE)
|
||||||
$var !~ /^(?:[a-z0-9_]*|[A-Z0-9_]*)?_?[a-z][A-Z](?:_[a-z0-9_]+|_[A-Z0-9_]+)?$/ &&
|
$var !~ /^(?:[a-z0-9_]*|[A-Z0-9_]*)?_?[a-z][A-Z](?:_[a-z0-9_]+|_[A-Z0-9_]+)?$/ &&
|
||||||
|
|
@ -5901,6 +5980,7 @@ sub process {
|
||||||
$dstat !~ /$exceptions/ &&
|
$dstat !~ /$exceptions/ &&
|
||||||
$dstat !~ /^\.$Ident\s*=/ && # .foo =
|
$dstat !~ /^\.$Ident\s*=/ && # .foo =
|
||||||
$dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
|
$dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
|
||||||
|
$dstat !~ /^case\b/ && # case ...
|
||||||
$dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
|
$dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
|
||||||
$dstat !~ /^while\s*$Constant\s*$Constant\s*$/ && # while (...) {...}
|
$dstat !~ /^while\s*$Constant\s*$Constant\s*$/ && # while (...) {...}
|
||||||
$dstat !~ /^for\s*$Constant$/ && # for (...)
|
$dstat !~ /^for\s*$Constant$/ && # for (...)
|
||||||
|
|
@ -5973,6 +6053,9 @@ sub process {
|
||||||
|
|
||||||
# check for line continuations outside of #defines, preprocessor #, and asm
|
# check for line continuations outside of #defines, preprocessor #, and asm
|
||||||
|
|
||||||
|
} elsif ($realfile =~ m@/vmlinux.lds.h$@) {
|
||||||
|
$line =~ s/(\w+)/$maybe_linker_symbol{$1}++/ge;
|
||||||
|
#print "REAL: $realfile\nln: $line\nkeys:", sort keys %maybe_linker_symbol;
|
||||||
} else {
|
} else {
|
||||||
if ($prevline !~ /^..*\\$/ &&
|
if ($prevline !~ /^..*\\$/ &&
|
||||||
$line !~ /^\+\s*\#.*\\$/ && # preprocessor
|
$line !~ /^\+\s*\#.*\\$/ && # preprocessor
|
||||||
|
|
@ -6910,10 +6993,22 @@ sub process {
|
||||||
# }
|
# }
|
||||||
# }
|
# }
|
||||||
|
|
||||||
|
# strcpy uses that should likely be strscpy
|
||||||
|
if ($line =~ /\bstrcpy\s*\(/) {
|
||||||
|
WARN("STRCPY",
|
||||||
|
"Prefer strscpy over strcpy - see: https://github.com/KSPP/linux/issues/88\n" . $herecurr);
|
||||||
|
}
|
||||||
|
|
||||||
# strlcpy uses that should likely be strscpy
|
# strlcpy uses that should likely be strscpy
|
||||||
if ($line =~ /\bstrlcpy\s*\(/) {
|
if ($line =~ /\bstrlcpy\s*\(/) {
|
||||||
WARN("STRLCPY",
|
WARN("STRLCPY",
|
||||||
"Prefer strscpy over strlcpy - see: https://lore.kernel.org/r/CAHk-=wgfRnXz0W3D37d01q3JFkr_i_uTL=V6A6G1oUZcprmknw\@mail.gmail.com/\n" . $herecurr);
|
"Prefer strscpy over strlcpy - see: https://github.com/KSPP/linux/issues/89\n" . $herecurr);
|
||||||
|
}
|
||||||
|
|
||||||
|
# strncpy uses that should likely be strscpy or strscpy_pad
|
||||||
|
if ($line =~ /\bstrncpy\s*\(/) {
|
||||||
|
WARN("STRNCPY",
|
||||||
|
"Prefer strscpy, strscpy_pad, or __nonstring over strncpy - see: https://github.com/KSPP/linux/issues/90\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
# typecasts on min/max could be min_t/max_t
|
# typecasts on min/max could be min_t/max_t
|
||||||
|
|
@ -7020,6 +7115,21 @@ sub process {
|
||||||
"arguments for function declarations should follow identifier\n" . $herecurr);
|
"arguments for function declarations should follow identifier\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} elsif ($realfile =~ /\.c$/ && defined $stat &&
|
||||||
|
$stat =~ /^\+extern struct\s+(\w+)\s+(\w+)\[\];/)
|
||||||
|
{
|
||||||
|
my ($st_type, $st_name) = ($1, $2);
|
||||||
|
|
||||||
|
for my $s (keys %maybe_linker_symbol) {
|
||||||
|
#print "Linker symbol? $st_name : $s\n";
|
||||||
|
goto LIKELY_LINKER_SYMBOL
|
||||||
|
if $st_name =~ /$s/;
|
||||||
|
}
|
||||||
|
WARN("AVOID_EXTERNS",
|
||||||
|
"found a file-scoped extern type:$st_type name:$st_name in .c file\n"
|
||||||
|
. "is this a linker symbol ?\n" . $herecurr);
|
||||||
|
LIKELY_LINKER_SYMBOL:
|
||||||
|
|
||||||
} elsif ($realfile =~ /\.c$/ && defined $stat &&
|
} elsif ($realfile =~ /\.c$/ && defined $stat &&
|
||||||
$stat =~ /^.\s*extern\s+/)
|
$stat =~ /^.\s*extern\s+/)
|
||||||
{
|
{
|
||||||
|
|
@ -7128,7 +7238,7 @@ sub process {
|
||||||
}
|
}
|
||||||
|
|
||||||
# check for alloc argument mismatch
|
# check for alloc argument mismatch
|
||||||
if ($line =~ /\b((?:devm_)?(?:kcalloc|kmalloc_array))\s*\(\s*sizeof\b/) {
|
if ($line =~ /\b((?:devm_)?((?:k|kv)?(calloc|malloc_array)(?:_node)?))\s*\(\s*sizeof\b/) {
|
||||||
WARN("ALLOC_ARRAY_ARGS",
|
WARN("ALLOC_ARRAY_ARGS",
|
||||||
"$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr);
|
"$1 uses number as first arg, sizeof is generally wrong\n" . $herecurr);
|
||||||
}
|
}
|
||||||
|
|
@ -7331,6 +7441,16 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# check for array definition/declarations that should use flexible arrays instead
|
||||||
|
if ($sline =~ /^[\+ ]\s*\}(?:\s*__packed)?\s*;\s*$/ &&
|
||||||
|
$prevline =~ /^\+\s*(?:\}(?:\s*__packed\s*)?|$Type)\s*$Ident\s*\[\s*(0|1)\s*\]\s*;\s*$/) {
|
||||||
|
if (ERROR("FLEXIBLE_ARRAY",
|
||||||
|
"Use C99 flexible arrays - see https://docs.kernel.org/process/deprecated.html#zero-length-and-one-element-arrays\n" . $hereprev) &&
|
||||||
|
$1 == '0' && $fix) {
|
||||||
|
$fixed[$fixlinenr - 1] =~ s/\[\s*0\s*\]/[]/;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# nested likely/unlikely calls
|
# nested likely/unlikely calls
|
||||||
if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
|
if ($line =~ /\b(?:(?:un)?likely)\s*\(\s*!?\s*(IS_ERR(?:_OR_NULL|_VALUE)?|WARN)/) {
|
||||||
WARN("LIKELY_MISUSE",
|
WARN("LIKELY_MISUSE",
|
||||||
|
|
@ -7348,6 +7468,30 @@ sub process {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Complain about RCU Tasks Trace used outside of BPF (and of course, RCU).
|
||||||
|
our $rcu_trace_funcs = qr{(?x:
|
||||||
|
rcu_read_lock_trace |
|
||||||
|
rcu_read_lock_trace_held |
|
||||||
|
rcu_read_unlock_trace |
|
||||||
|
call_rcu_tasks_trace |
|
||||||
|
synchronize_rcu_tasks_trace |
|
||||||
|
rcu_barrier_tasks_trace |
|
||||||
|
rcu_request_urgent_qs_task
|
||||||
|
)};
|
||||||
|
our $rcu_trace_paths = qr{(?x:
|
||||||
|
kernel/bpf/ |
|
||||||
|
include/linux/bpf |
|
||||||
|
net/bpf/ |
|
||||||
|
kernel/rcu/ |
|
||||||
|
include/linux/rcu
|
||||||
|
)};
|
||||||
|
if ($line =~ /\b($rcu_trace_funcs)\s*\(/) {
|
||||||
|
if ($realfile !~ m{^$rcu_trace_paths}) {
|
||||||
|
WARN("RCU_TASKS_TRACE",
|
||||||
|
"use of RCU tasks trace is incorrect outside BPF or core RCU code\n" . $herecurr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# check for lockdep_set_novalidate_class
|
# check for lockdep_set_novalidate_class
|
||||||
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
|
if ($line =~ /^.\s*lockdep_set_novalidate_class\s*\(/ ||
|
||||||
$line =~ /__lockdep_no_validate__\s*\)/ ) {
|
$line =~ /__lockdep_no_validate__\s*\)/ ) {
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue