Merge branch 'android14-6.1' into branch 'android14-6.1-lts'

This merges the latest changes from branch android14-6.1 into the
android14-6.1-lts branch to catch up on bugfixes and some new symbols to
track.

Included in here are the following commits:

*   dc61d0ccd6 Merge "Merge tag 'android14-6.1.57_r00' into branch 'android14-6.1'" into android14-6.1
|\
| * bf9a785d04 Merge tag 'android14-6.1.57_r00' into branch 'android14-6.1'
* ceb6ff1a69 ANDROID: Update the ABI symbol list
* 0d97bca80a ANDROID: sched: Add vendor hook for update_load_sum
* eba89bbb6f FROMGIT: freezer,sched: clean saved_state when restoring it during thaw
* 2a5c5d7c47 FROMGIT: freezer,sched: do not restore saved_state of a thawed task
* 6e3127c7ba ANDROID: GKI: add allowed list for Exynosauto SoC
* af85ead8ce ANDROID: KVM: arm64: pkvm_module_ops documentation
* c331f5b7af ANDROID: Update the ABI symbol list
* bcb7dfe013 UPSTREAM: usb: typec: tcpm: Fix NULL pointer dereference in tcpm_pd_svdm()
* 61ca1246d9 ANDROID: GKI: Update oplus symbol list
* d3787b952a UPSTREAM: drm/qxl: fix UAF on handle creation
* a2377cc135 FROMGIT: usb:gadget:uvc Do not use worker thread to pump isoc usb requests
* 82a411cec6 FROMGIT: usb: gadget: uvc: Fix use-after-free for inflight usb_requests
* 3c26a5d92f FROMGIT: usb: gadget: uvc: move video disable logic to its own function
* 20853add09 FROMGIT: usb: gadget: uvc: Allocate uvc_requests one at a time
* 5f3550218b FROMGIT: usb: gadget: uvc: prevent use of disabled endpoint
* 9673df54cd UPSTREAM: drm/fourcc: Add NV20 and NV30 YUV formats
* 3ee517981d FROMLIST: virt: geniezone: Add memory relinquish support
* c57b152c45 FROMGIT: Input: uinput - allow injecting event times
* df6e6fc38f UPSTREAM: PM: hibernate: Fix copying the zero bitmap to safe pages
* 7181d45e36 UPSTREAM: PM: hibernate: don't store zero pages in the image file
* 7385b83107 UPSTREAM: PM: hibernate: Complain about memory map mismatches during resume

Change-Id: I41e7aa4c8a3ab86f9421e32a0acd0fb8f991bb81
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2023-11-30 10:25:13 +00:00
commit 3e0f75c1a4
24 changed files with 940 additions and 266 deletions

View file

@ -304646,6 +304646,11 @@ function {
parameter_id: 0x31066a10
parameter_id: 0x32f690bf
}
function {
id: 0x910bc03d
return_type_id: 0x6720d32f
parameter_id: 0x322c8c4b
}
function {
id: 0x910c3195
return_type_id: 0x6720d32f
@ -304719,6 +304724,12 @@ function {
parameter_id: 0x0c0dfa25
parameter_id: 0x3382a638
}
function {
id: 0x9115c860
return_type_id: 0x6720d32f
parameter_id: 0x31b5a66f
parameter_id: 0x3e10b518
}
function {
id: 0x9115faa6
return_type_id: 0x6720d32f
@ -316408,6 +316419,14 @@ function {
parameter_id: 0x0400f16a
parameter_id: 0x4585663f
}
function {
id: 0x9b555663
return_type_id: 0x6720d32f
parameter_id: 0x18bd6530
parameter_id: 0x3c417e01
parameter_id: 0x2e18f543
parameter_id: 0x1bf16028
}
function {
id: 0x9b555e72
return_type_id: 0x6720d32f
@ -334616,6 +334635,15 @@ elf_symbol {
type_id: 0x99e4a009
full_name: "__traceiter_android_rvh_update_load_avg"
}
elf_symbol {
id: 0x6fdb7fd2
name: "__traceiter_android_rvh_update_load_sum"
is_defined: true
symbol_type: FUNCTION
crc: 0x2e9c5a7a
type_id: 0x9b555663
full_name: "__traceiter_android_rvh_update_load_sum"
}
elf_symbol {
id: 0x16809a5a
name: "__traceiter_android_rvh_update_misfit_status"
@ -338558,6 +338586,15 @@ elf_symbol {
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_rvh_update_load_avg"
}
elf_symbol {
id: 0x51c819cc
name: "__tracepoint_android_rvh_update_load_sum"
is_defined: true
symbol_type: OBJECT
crc: 0x885815ef
type_id: 0x18ccbd2c
full_name: "__tracepoint_android_rvh_update_load_sum"
}
elf_symbol {
id: 0x1362c5b0
name: "__tracepoint_android_rvh_update_misfit_status"
@ -354740,6 +354777,15 @@ elf_symbol {
type_id: 0x93e51922
full_name: "drm_connector_set_panel_orientation"
}
elf_symbol {
id: 0xcf3c9405
name: "drm_connector_set_path_property"
is_defined: true
symbol_type: FUNCTION
crc: 0x68f96c58
type_id: 0x9115c860
full_name: "drm_connector_set_path_property"
}
elf_symbol {
id: 0xe4a41c47
name: "drm_connector_unregister"
@ -356054,6 +356100,15 @@ elf_symbol {
type_id: 0x1e597e38
full_name: "drm_helper_move_panel_connectors_to_head"
}
elf_symbol {
id: 0x06afbf8f
name: "drm_helper_probe_detect"
is_defined: true
symbol_type: FUNCTION
crc: 0xfbd3d75f
type_id: 0x91f8e2fb
full_name: "drm_helper_probe_detect"
}
elf_symbol {
id: 0x25989156
name: "drm_helper_probe_single_connector_modes"
@ -372002,6 +372057,15 @@ elf_symbol {
type_id: 0x7bfdb6bc
full_name: "of_graph_get_endpoint_by_regs"
}
elf_symbol {
id: 0xc5a4cedf
name: "of_graph_get_endpoint_count"
is_defined: true
symbol_type: FUNCTION
crc: 0x1396a5ec
type_id: 0x910bc03d
full_name: "of_graph_get_endpoint_count"
}
elf_symbol {
id: 0x377ef2e6
name: "of_graph_get_next_endpoint"
@ -397074,6 +397138,7 @@ interface {
symbol_id: 0xa63eb82a
symbol_id: 0xcd824552
symbol_id: 0xb6d1fa25
symbol_id: 0x6fdb7fd2
symbol_id: 0x16809a5a
symbol_id: 0xfaa2e0a4
symbol_id: 0xc1ba0eb6
@ -397512,6 +397577,7 @@ interface {
symbol_id: 0xc3add2dc
symbol_id: 0xdc2af26c
symbol_id: 0xcc5017b7
symbol_id: 0x51c819cc
symbol_id: 0x1362c5b0
symbol_id: 0xaa2da792
symbol_id: 0x0e614ab0
@ -399309,6 +399375,7 @@ interface {
symbol_id: 0x8069ccc3
symbol_id: 0xbe9b3f22
symbol_id: 0xb9cc9c24
symbol_id: 0xcf3c9405
symbol_id: 0xe4a41c47
symbol_id: 0x33f2cc93
symbol_id: 0x1773ebf6
@ -399455,6 +399522,7 @@ interface {
symbol_id: 0x213004ed
symbol_id: 0xc62eba2d
symbol_id: 0x0869dd10
symbol_id: 0x06afbf8f
symbol_id: 0x25989156
symbol_id: 0x3a6e27e9
symbol_id: 0xec79cf1c
@ -401227,6 +401295,7 @@ interface {
symbol_id: 0xcbfc5627
symbol_id: 0xee3e4c4b
symbol_id: 0x083944d7
symbol_id: 0xc5a4cedf
symbol_id: 0x377ef2e6
symbol_id: 0xd87751ae
symbol_id: 0xd3bfa538

View file

@ -1,6 +1,5 @@
[abi_symbol_list]
# commonly used symbols
add_wait_queue
__alloc_pages
__alloc_percpu
alloc_workqueue
@ -34,6 +33,7 @@
class_destroy
clk_disable
clk_enable
clk_get
__clk_get_hw
clk_get_rate
clk_hw_get_parent
@ -44,13 +44,13 @@
clk_register_fixed_rate
clk_set_rate
clk_unprepare
clk_unregister
cma_alloc
cma_release
complete
complete_all
__const_udelay
__copy_overflow
cpu_bit_bitmap
__cpuhp_setup_state
cpu_hwcaps
cpu_number
@ -58,13 +58,11 @@
cpu_pm_register_notifier
__cpu_possible_mask
crc32_le
current_work
debugfs_create_dir
debugfs_create_file
debugfs_create_u32
debugfs_remove
default_llseek
default_wake_function
delayed_work_timer_fn
del_timer
del_timer_sync
@ -119,7 +117,6 @@
devm_regulator_register
devm_request_threaded_irq
__devm_reset_control_get
devm_rtc_device_register
devm_snd_soc_register_card
devm_snd_soc_register_component
devm_thermal_of_zone_register
@ -164,6 +161,7 @@
dma_resv_iter_next
dma_set_coherent_mask
dma_set_mask
dma_sync_sg_for_cpu
dma_sync_sg_for_device
dma_sync_single_for_cpu
dma_sync_single_for_device
@ -179,6 +177,8 @@
drm_atomic_helper_connector_destroy_state
drm_atomic_helper_connector_duplicate_state
drm_atomic_helper_connector_reset
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_init
drm_bridge_add
drm_bridge_remove
drm_compat_ioctl
@ -242,6 +242,7 @@
drm_modeset_unlock
drm_mode_vrefresh
drm_object_attach_property
drm_object_property_set_value
drm_open
drm_poll
drm_prime_gem_destroy
@ -279,6 +280,7 @@
gen_pool_size
get_device
get_random_bytes
get_random_u32
get_unused_fd_flags
gic_nonsecure_priorities
gpiod_direction_input
@ -286,6 +288,8 @@
gpiod_get_raw_value
gpiod_set_raw_value
gpiod_set_value_cansleep
gpio_free
gpio_request
gpio_request_one
gpio_to_desc
handle_edge_irq
@ -297,6 +301,8 @@
i2c_add_numbered_adapter
i2c_del_adapter
i2c_del_driver
i2c_get_adapter
i2c_new_client_device
i2c_register_driver
i2c_transfer
i2c_unregister_device
@ -350,26 +356,31 @@
kmem_cache_free
kmemdup
kobject_create_and_add
kobject_init_and_add
kobject_put
krealloc
kstrdup
kstrtobool
kstrtoint
kstrtoll
kstrtou16
kstrtou8
kstrtouint
kstrtoull
kthread_complete_and_exit
kthread_create_on_node
kthread_flush_work
__kthread_init_worker
kthread_park
kthread_parkme
kthread_queue_work
kthread_should_park
kthread_should_stop
kthread_stop
kthread_unpark
kthread_worker_fn
ktime_get
ktime_get_mono_fast_ns
ktime_get_real_ts64
ktime_get_ts64
ktime_get_with_offset
kvfree
kvmalloc_node
__list_add_valid
@ -379,11 +390,7 @@
log_read_mmio
log_write_mmio
loops_per_jiffy
mdiobus_alloc_size
mdiobus_free
mdiobus_get_phy
mdiobus_read
mdiobus_unregister
mdiobus_write
memcpy
memdup_user
@ -396,9 +403,7 @@
mipi_dsi_device_unregister
mipi_dsi_driver_register_full
mipi_dsi_driver_unregister
misc_deregister
misc_register
mod_delayed_work_on
mod_timer
module_layout
__msecs_to_jiffies
@ -412,8 +417,6 @@
netdev_err
netdev_info
netdev_warn
netif_carrier_off
nonseekable_open
noop_llseek
nr_cpu_ids
ns_to_timespec64
@ -421,7 +424,6 @@
of_address_to_resource
of_alias_get_id
of_clk_add_provider
of_clk_del_provider
of_clk_get
of_clk_src_onecell_get
of_count_phandle_with_args
@ -433,7 +435,6 @@
of_find_matching_node_and_match
of_find_mipi_dsi_host_by_node
of_find_node_by_name
of_find_node_opts_by_path
of_find_property
of_get_child_by_name
of_get_display_timings
@ -446,7 +447,6 @@
of_iomap
of_match_device
of_match_node
__of_mdiobus_register
of_n_addr_cells
of_n_size_cells
__of_parse_phandle_with_args
@ -487,7 +487,7 @@
__per_cpu_offset
perf_trace_buf_alloc
perf_trace_run_bpf_submit
phy_attached_info
phy_configure
phy_init_eee
phylink_create
phylink_generic_validate
@ -495,12 +495,9 @@
phy_power_on
pinctrl_lookup_state
pinctrl_select_state
platform_device_register_full
platform_device_unregister
__platform_driver_probe
__platform_driver_register
platform_driver_unregister
platform_find_device_by_driver
platform_get_irq
platform_get_irq_byname
platform_get_irq_byname_optional
@ -524,7 +521,6 @@
prepare_to_wait_event
print_hex_dump
_printk
proc_create_data
put_device
__put_task_struct
put_unused_fd
@ -551,7 +547,6 @@
__register_chrdev
register_console
register_pm_notifier
register_restart_handler
register_syscore_ops
regmap_read
regmap_update_bits_base
@ -564,14 +559,13 @@
regulator_map_voltage_linear
release_firmware
__release_region
remove_proc_entry
remove_wait_queue
request_firmware
__request_region
request_threaded_irq
reset_control_assert
reset_control_deassert
sched_clock
sched_set_fifo
schedule
schedule_timeout
scnprintf
@ -580,6 +574,7 @@
seq_putc
seq_puts
seq_read
set_cpus_allowed_ptr
sg_alloc_table
sg_free_table
sg_init_table
@ -590,7 +585,6 @@
simple_attr_write
simple_open
simple_read_from_buffer
simple_strtoul
simple_write_to_buffer
single_open
single_release
@ -634,6 +628,7 @@
__sw_hweight32
sync_file_create
syscon_regmap_lookup_by_phandle
sysfs_create_file_ns
sysfs_create_group
sysfs_create_groups
sysfs_emit
@ -657,15 +652,28 @@
__usecs_to_jiffies
usleep_range_state
v4l2_device_register
v4l2_device_register_subdev
v4l2_device_unregister
v4l2_device_unregister_subdev
v4l2_fh_add
v4l2_fh_del
v4l2_fh_exit
v4l2_fh_init
v4l2_subdev_call_wrappers
v4l2_subdev_init
vb2_buffer_done
vb2_dma_sg_memops
vb2_dqbuf
vb2_plane_cookie
vb2_plane_vaddr
vb2_poll
vb2_qbuf
vb2_querybuf
vb2_queue_init
vb2_queue_release
vb2_reqbufs
vb2_streamoff
vb2_streamon
vfree
video_devdata
video_device_alloc
@ -678,7 +686,7 @@
vmap
vsnprintf
vunmap
wait_for_completion
vzalloc
wait_for_completion_interruptible_timeout
wait_for_completion_timeout
__wake_up
@ -733,9 +741,7 @@
drm_atomic_get_new_private_obj_state
drm_atomic_get_old_private_obj_state
drm_atomic_get_private_obj_state
__drm_atomic_helper_private_obj_duplicate_state
drm_atomic_private_obj_fini
drm_atomic_private_obj_init
drm_crtc_commit_wait
drm_crtc_wait_one_vblank
__drm_debug
@ -744,7 +750,6 @@
drm_edid_duplicate
drm_edid_get_monitor_name
drm_modeset_lock_single_interruptible
drm_object_property_set_value
__drm_printfn_debug
memchr_inv
__sw_hweight8
@ -772,6 +777,7 @@
nr_irqs
proc_create
register_die_notifier
register_restart_handler
return_address
rtc_time64_to_tm
sys_tz
@ -821,7 +827,6 @@
tty_std_termios
tty_unregister_driver
unregister_console
vzalloc
# required by dwc3-exynosauto-usb.ko
device_create_managed_software_node
@ -867,10 +872,13 @@
freq_qos_add_request
freq_qos_update_request
get_cpu_device
sysfs_create_file_ns
system_state
# required by exynos-adv-tracer-s2d.ko
simple_strtoul
# required by exynos-chipid_v2.ko
of_find_node_opts_by_path
soc_device_register
subsys_system_register
@ -900,6 +908,7 @@
pm_genpd_init
# required by exynos9drm.ko
add_wait_queue
bus_find_device
component_add
component_bind_all
@ -936,6 +945,7 @@
drm_atomic_helper_update_plane
drm_atomic_helper_wait_for_vblanks
drm_bridge_attach
drm_connector_set_path_property
drm_crtc_arm_vblank_event
drm_crtc_handle_vblank
drm_crtc_init_with_planes
@ -953,6 +963,7 @@
drm_gem_vm_open
drm_get_connector_status_name
drm_get_format_info
drm_helper_probe_detect
drm_mode_config_helper_resume
drm_mode_config_helper_suspend
drm_mode_config_reset
@ -971,6 +982,7 @@
drm_plane_create_zpos_immutable_property
drm_plane_create_zpos_property
__drm_printfn_info
drm_property_blob_put
drm_property_create
drm_property_create_blob
drm_rotation_simplify
@ -980,13 +992,13 @@
drm_writeback_queue_job
drm_writeback_signal_completion
gpiod_to_irq
kstrtou8
mipi_dsi_host_register
mipi_dsi_host_unregister
of_drm_find_bridge
of_drm_find_panel
of_find_i2c_device_by_node
of_graph_get_endpoint_by_regs
of_graph_get_endpoint_count
of_graph_get_next_endpoint
of_graph_get_port_by_id
of_graph_get_remote_port
@ -995,14 +1007,13 @@
of_phandle_iterator_next
param_ops_long
platform_bus_type
platform_find_device_by_driver
seq_hex_dump
seq_release
strnstr
synchronize_irq
vmalloc_to_page
# required by exynos_mfc.ko
clk_get
clk_put
hex_dump_to_buffer
iommu_map
@ -1010,15 +1021,6 @@
iommu_unmap
__sw_hweight64
_totalram_pages
vb2_dqbuf
vb2_plane_vaddr
vb2_poll
vb2_qbuf
vb2_querybuf
vb2_queue_release
vb2_reqbufs
vb2_streamoff
vb2_streamon
# required by exynos_pm_qos.ko
kstrtoint_from_user
@ -1048,10 +1050,16 @@
# required by exynosauto_v920_thermal.ko
devm_thermal_of_zone_unregister
kthread_delayed_work_timer_fn
kthread_mod_delayed_work
of_thermal_get_ntrips
strncasecmp
thermal_cdev_update
# required by gpu-sched.ko
mod_delayed_work_on
sched_set_fifo_low
wait_for_completion
__xa_alloc
xa_destroy
xa_erase
@ -1084,7 +1092,6 @@
i2c_adapter_type
i2c_bus_type
i2c_for_each_dev
i2c_get_adapter
i2c_put_adapter
i2c_smbus_xfer
i2c_transfer_buffer_flags
@ -1092,11 +1099,9 @@
register_chrdev_region
unregister_chrdev_region
# required by i2c-exynosauto.ko
cpu_bit_bitmap
# required by lontium-lt9611.ko
drm_hdmi_avi_infoframe_from_display_mode
platform_device_register_full
regmap_multi_reg_write
regulator_set_load
@ -1115,7 +1120,6 @@
# required by phy-exynosauto-usbdrd-super.ko
__clk_is_enabled
gpio_request
# required by pinctrl-samsung-core.ko
device_get_next_child_node
@ -1185,6 +1189,7 @@
phy_write_paged
# required by rtc-s2vps02.ko
devm_rtc_device_register
pm_wakeup_ws_event
rtc_update_irq
rtc_valid_tm
@ -1193,6 +1198,26 @@
__devm_irq_alloc_descs
handle_nested_irq
# required by sam-is.ko
down
down_trylock
get_task_pid
kernel_neon_begin
kernel_neon_end
kobject_del
kthread_flush_worker
pm_relax
pm_stay_awake
register_reboot_notifier
regulator_get_optional
regulator_get_voltage
regulator_is_enabled
regulator_put
regulator_set_voltage
unregister_reboot_notifier
vb2_mmap
vscnprintf
# required by samsung-bridge-dummy.ko
drm_atomic_helper_bridge_propagate_bus_fmt
@ -1219,9 +1244,9 @@
dma_heap_get_dev
dma_heap_get_drvdata
dma_heap_get_name
dma_sync_sg_for_cpu
is_dma_buf_file
iterate_fd
misc_deregister
remap_pfn_range
__sg_page_iter_next
__sg_page_iter_start
@ -1370,11 +1395,9 @@
__fdget
_find_first_zero_bit
__folio_put
get_random_u32
__get_task_comm
handle_simple_irq
i2c_bit_add_bus
i2c_new_client_device
ida_destroy
idr_alloc
idr_destroy
@ -1386,7 +1409,7 @@
iommu_iova_to_phys
jiffies64_to_msecs
jiffies_to_usecs
ktime_get_ts64
kobject_put
__memcpy_fromio
__memcpy_toio
memremap
@ -1539,7 +1562,6 @@
snd_soc_dapm_put_volsw
# required by spi-exynosauto.ko
gpio_free
__spi_alloc_controller
spi_controller_resume
spi_controller_suspend
@ -1595,6 +1617,10 @@
fwnode_get_phy_node
get_device_system_crosststamp
__local_bh_enable_ip
mdiobus_alloc_size
mdiobus_free
mdiobus_get_phy
mdiobus_unregister
mdio_device_create
mdio_device_free
__napi_alloc_skb
@ -1608,6 +1634,7 @@
netdev_pick_tx
netdev_rss_key_fill
netdev_update_features
netif_carrier_off
netif_carrier_on
netif_device_attach
netif_device_detach
@ -1618,11 +1645,13 @@
netif_set_real_num_tx_queues
netif_tx_wake_queue
net_ratelimit
__of_mdiobus_register
page_pool_alloc_pages
page_pool_create
page_pool_destroy
page_pool_put_defragged_page
page_pool_release_page
phy_attached_info
phylink_connect_phy
phylink_destroy
phylink_disconnect_phy
@ -1719,58 +1748,28 @@
ufshcd_system_resume
ufshcd_system_suspend
# required by vbufq-fe-module.ko
kstrndup
# required by vdriver-lib-module.ko
kobject_get
kset_create_and_add
# required by vi2c-fe-module.ko
down
# required by vlx-clk-ctrl-common-module.ko
vscnprintf
# required by vlx-hyp-module.ko
irq_create_of_mapping
irq_dispose_mapping
irq_find_matching_fwspec
of_irq_find_parent
pfn_is_map_memory
pm_power_off
proc_mkdir
# required by vlx-last-kmsg-module.ko
proc_set_size
# required by vlx-prop-module.ko
kobject_uevent
kset_unregister
sysfs_create_bin_file
sysfs_remove_bin_file
# required by vlx-vipc-module.ko
sigprocmask
# required by vthermal-fe-module.ko
thermal_of_zone_register
thermal_of_zone_unregister
# required by vvideo2-fe-module.ko
dma_buf_fd
__task_pid_nr_ns
vprintk
# required by vwatchdog-module.ko
watchdog_register_device
watchdog_unregister_device
# preserved by --additions-only
clk_unregister
console_suspend_enabled
current_work
default_wake_function
dma_buf_fd
dw_pcie_ep_init
dw_pcie_read_dbi
dw_pcie_write_dbi
irq_create_of_mapping
irq_dispose_mapping
irq_find_matching_fwspec
kobject_get
kobject_init_and_add
kobject_uevent
kset_create_and_add
kset_unregister
kstrndup
kthread_complete_and_exit
nonseekable_open
of_clk_del_provider
of_irq_find_parent
pci_disable_device
pci_disable_msi
pci_enable_device
@ -1784,3 +1783,21 @@
__pci_register_driver
pci_request_regions
pci_unregister_driver
pfn_is_map_memory
__platform_driver_probe
pm_power_off
proc_create_data
proc_mkdir
proc_set_size
remove_proc_entry
remove_wait_queue
sigprocmask
strnstr
sysfs_create_bin_file
sysfs_remove_bin_file
__task_pid_nr_ns
thermal_of_zone_register
thermal_of_zone_unregister
vprintk
watchdog_register_device
watchdog_unregister_device

View file

@ -92,6 +92,8 @@
snd_soc_find_dai
spi_bus_type
stack_trace_save_tsk
static_key_enable
static_key_disable
stpcpy
task_rq_lock
tcf_action_exec
@ -162,6 +164,7 @@
__traceiter_android_vh_rwsem_wake_finish
__traceiter_android_vh_adjust_alloc_flags
__traceiter_android_vh_adjust_kvmalloc_flags
__traceiter_android_vh_alloc_pages_slowpath
__traceiter_android_vh_sched_stat_runtime_rt
__traceiter_android_vh_shrink_node_memcgs
__traceiter_android_vh_sync_txn_recvd
@ -260,6 +263,7 @@
__tracepoint_android_vh_rwsem_wake_finish
__tracepoint_android_vh_adjust_alloc_flags
__tracepoint_android_vh_adjust_kvmalloc_flags
__tracepoint_android_vh_alloc_pages_slowpath
__tracepoint_android_vh_sched_stat_runtime_rt
__tracepoint_android_vh_shrink_node_memcgs
__tracepoint_android_vh_sync_txn_recvd

View file

@ -539,6 +539,7 @@
dma_buf_vunmap
dmaengine_unmap_put
dma_fence_add_callback
dma_fence_array_create
dma_fence_context_alloc
dma_fence_default_wait
dma_fence_get_status
@ -2187,6 +2188,7 @@
__traceiter_android_rvh_uclamp_eff_get
__traceiter_android_rvh_update_blocked_fair
__traceiter_android_rvh_update_load_avg
__traceiter_android_rvh_update_load_sum
__traceiter_android_rvh_update_misfit_status
__traceiter_android_rvh_update_rt_rq_load_avg
__traceiter_android_rvh_util_fits_cpu
@ -2292,6 +2294,7 @@
__tracepoint_android_rvh_uclamp_eff_get
__tracepoint_android_rvh_update_blocked_fair
__tracepoint_android_rvh_update_load_avg
__tracepoint_android_rvh_update_load_sum
__tracepoint_android_rvh_update_misfit_status
__tracepoint_android_rvh_update_rt_rq_load_avg
__tracepoint_android_rvh_util_fits_cpu

View file

@ -16,6 +16,107 @@ enum pkvm_psci_notification {
};
#ifdef CONFIG_MODULES
/**
* struct pkvm_module_ops - pKVM modules callbacks
* @create_private_mapping: Map a memory region into the hypervisor private
* range. @haddr returns the virtual address where
* the mapping starts. It can't be unmapped. Host
* access permissions are unaffected.
* @alloc_module_va: Reserve a range of VA space in the hypervisor
* private range. This is handy for modules that
* need to map plugin code in a similar fashion to
* how pKVM maps module code. That space could also
* be used to map memory temporarily, when the
* fixmap granularity (PAGE_SIZE) is too small.
* @map_module_page: Used in conjunction with @alloc_module_va. When
* @is_protected is not set, the page is also
* unmapped from the host stage-2.
* @register_serial_driver: Register a driver for a serial interface. The
* framework only needs a single callback
* @hyp_putc_cb which is expected to print a single
* character.
* @puts: If a serial interface is registered, print a
* string, else does nothing.
* @putx64: If a serial interface is registered, print a
* 64-bit number, else does nothing.
* @fixmap_map: Map a page in the per-CPU hypervisor fixmap.
* This is intended to be used for temporary
* mappings in the hypervisor VA space.
* @fixmap_unmap must be called between each
* mapping to do cache maintenance and ensure the
* new mapping is visible.
* @fixmap_unmap: Unmap a page from the hypervisor fixmap. This
* call is required between each @fixmap_map().
* @linear_map_early: Map a large portion of memory into the
* hypervisor linear VA space. This is intended to
* be used only for module bootstrap and must be
* unmapped before the host is deprivilged.
* @linear_unmap_early: See @linear_map_early.
* @flush_dcache_to_poc: Clean the data cache to the point of coherency.
* This is not a requirement for any other of the
* pkvm_module_ops callbacks.
* @update_hcr_el2: Modify the running value of HCR_EL2. pKVM will
* save/restore the new value across power
* management transitions.
* @update_hfgwtr_el2: Modify the running value of HFGWTR_EL2. pKVM
* will save/restore the new value across power
* management transitions.
* @register_host_perm_fault_handler:
* @cb is called whenever the host generates an
* abort with the fault status code Permission
* Fault. Returning -EPERM lets pKVM handle the
* abort. This is useful when a module changes the
* host stage-2 permissions for certain pages.
* @host_stage2_mod_prot: Apply @prot to the page @pfn. This requires a
* permission fault handler to be registered (see
* @register_host_perm_fault_handler), otherwise
* pKVM will be unable to handle this fault and the
* CPU will be stuck in an infinite loop.
* @host_stage2_get_leaf: Query the host's stage2 page-table entry for
* the page @phys.
* @register_host_smc_handler: @cb is called whenever the host issues an SMC
* pKVM couldn't handle. If @cb returns false, the
* SMC will be forwarded to EL3.
* @register_default_trap_handler:
* @cb is called whenever EL2 traps EL1 and pKVM
* has not handled it. If @cb returns false, the
* hypervisor will panic. This trap handler must be
* registered whenever changes are made to HCR
* (@update_hcr_el2) or HFGWTR
* (@update_hfgwtr_el2).
* @register_illegal_abt_notifier:
* To notify the module of a pending illegal abort
* from the host. On @cb return, the abort will be
* injected back into the host.
* @register_psci_notifier: To notify the module of a pending PSCI event.
* @register_hyp_panic_notifier:
* To notify the module of a pending hypervisor
* panic. On return from @cb, the panic will occur.
* @host_donate_hyp: The page @pfn is unmapped from the host and
* full control is given to the hypervisor.
* @hyp_donate_host: The page @pfn whom control has previously been
* given to the hypervisor (@host_donate_hyp) is
* given back to the host.
* @host_share_hyp: The page @pfn will be shared between the host
* and the hypervisor. Must be followed by
* @pin_shared_mem.
* @host_unshare_hyp: The page @pfn will be unshared and unmapped from
* the hypervisor. Must be called after
* @unpin_shared_mem.
* @pin_shared_mem: After @host_share_hyp, the newly shared page is
* still owned by the host. @pin_shared_mem will
* prevent the host from reclaiming that page until
* the hypervisor releases it (@unpin_shared_mem)
* @unpin_shared_mem: Enable the host to reclaim the shared memory
* (@host_unshare_hyp).
* @memcpy: Same as kernel memcpy, but use hypervisor VAs.
* @memset: Same as kernel memset, but use a hypervisor VA.
* @hyp_pa: Return the physical address for a hypervisor
* virtual address in the linear range.
* @hyp_va: Convert a physical address into a virtual one.
* @kern_hyp_va: Convert a kernel virtual address into an
* hypervisor virtual one.
*/
struct pkvm_module_ops {
int (*create_private_mapping)(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot,

View file

@ -299,6 +299,14 @@ const struct drm_format_info *__drm_format_info(u32 format)
.num_planes = 2, .char_per_block = { 5, 5, 0 },
.block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 2, .is_yuv = true },
{ .format = DRM_FORMAT_NV20, .depth = 0,
.num_planes = 2, .char_per_block = { 5, 5, 0 },
.block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 2,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_NV30, .depth = 0,
.num_planes = 2, .char_per_block = { 5, 5, 0 },
.block_w = { 4, 2, 0 }, .block_h = { 1, 1, 0 }, .hsub = 1,
.vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Q410, .depth = 0,
.num_planes = 3, .char_per_block = { 2, 2, 2 },
.block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 }, .hsub = 1,

View file

@ -33,6 +33,7 @@
#define UINPUT_NAME "uinput"
#define UINPUT_BUFFER_SIZE 16
#define UINPUT_NUM_REQUESTS 16
#define UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS 10
enum uinput_state { UIST_NEW_DEVICE, UIST_SETUP_COMPLETE, UIST_CREATED };
@ -569,11 +570,40 @@ static int uinput_setup_device_legacy(struct uinput_device *udev,
return retval;
}
/*
* Returns true if the given timestamp is valid (i.e., if all the following
* conditions are satisfied), false otherwise.
* 1) given timestamp is positive
* 2) it's within the allowed offset before the current time
* 3) it's not in the future
*/
static bool is_valid_timestamp(const ktime_t timestamp)
{
ktime_t zero_time;
ktime_t current_time;
ktime_t min_time;
ktime_t offset;
zero_time = ktime_set(0, 0);
if (ktime_compare(zero_time, timestamp) >= 0)
return false;
current_time = ktime_get();
offset = ktime_set(UINPUT_TIMESTAMP_ALLOWED_OFFSET_SECS, 0);
min_time = ktime_sub(current_time, offset);
if (ktime_after(min_time, timestamp) || ktime_after(timestamp, current_time))
return false;
return true;
}
static ssize_t uinput_inject_events(struct uinput_device *udev,
const char __user *buffer, size_t count)
{
struct input_event ev;
size_t bytes = 0;
ktime_t timestamp;
if (count != 0 && count < input_event_size())
return -EINVAL;
@ -588,6 +618,10 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
if (input_event_from_user(buffer + bytes, &ev))
return -EFAULT;
timestamp = ktime_set(ev.input_event_sec, ev.input_event_usec * NSEC_PER_USEC);
if (is_valid_timestamp(timestamp))
input_set_timestamp(udev->dev, timestamp);
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
cond_resched();

View file

@ -263,10 +263,13 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
return 0;
}
void uvc_function_setup_continue(struct uvc_device *uvc)
void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
if (disable_ep && uvc->video.ep)
usb_ep_disable(uvc->video.ep);
usb_composite_setup_continue(cdev);
}
@ -334,15 +337,11 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (uvc->state != UVC_STATE_STREAMING)
return 0;
if (uvc->video.ep)
usb_ep_disable(uvc->video.ep);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_STREAMOFF;
v4l2_event_queue(&uvc->vdev, &v4l2_event);
uvc->state = UVC_STATE_CONNECTED;
return 0;
return USB_GADGET_DELAYED_STATUS;
case 1:
if (uvc->state != UVC_STATE_CONNECTED)

View file

@ -11,7 +11,7 @@
struct uvc_device;
void uvc_function_setup_continue(struct uvc_device *uvc);
void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
void uvc_function_connect(struct uvc_device *uvc);

View file

@ -81,6 +81,7 @@ struct uvc_request {
struct sg_table sgt;
u8 header[UVCG_REQUEST_HEADER_LEN];
struct uvc_buffer *last_buf;
struct list_head list;
};
struct uvc_video {
@ -101,9 +102,18 @@ struct uvc_video {
unsigned int uvc_num_requests;
/* Requests */
bool is_enabled; /* tracks whether video stream is enabled */
unsigned int req_size;
struct uvc_request *ureq;
struct list_head ureqs; /* all uvc_requests allocated by uvc_video */
/* USB requests that the video pump thread can encode into */
struct list_head req_free;
/*
* USB requests video pump thread has already encoded into. These are
* ready to be queued to the endpoint.
*/
struct list_head req_ready;
spinlock_t req_lock;
unsigned int req_int_count;
@ -175,7 +185,7 @@ struct uvc_file_handle {
* Functions
*/
extern void uvc_function_setup_continue(struct uvc_device *uvc);
extern void uvc_function_setup_continue(struct uvc_device *uvc, int disable_ep);
extern void uvc_function_connect(struct uvc_device *uvc);
extern void uvc_function_disconnect(struct uvc_device *uvc);

View file

@ -449,7 +449,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
return -EINVAL;
/* Enable UVC video. */
ret = uvcg_video_enable(video, 1);
ret = uvcg_video_enable(video);
if (ret < 0)
return ret;
@ -457,7 +457,7 @@ uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
* Complete the alternate setting selection setup phase now that
* userspace is ready to provide video frames.
*/
uvc_function_setup_continue(uvc);
uvc_function_setup_continue(uvc, 0);
uvc->state = UVC_STATE_STREAMING;
return 0;
@ -469,11 +469,18 @@ uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret = 0;
if (type != video->queue.queue.type)
return -EINVAL;
return uvcg_video_enable(video, 0);
ret = uvcg_video_disable(video);
if (ret < 0)
return ret;
uvc->state = UVC_STATE_CONNECTED;
uvc_function_setup_continue(uvc, 1);
return 0;
}
static int
@ -506,7 +513,7 @@ uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
static void uvc_v4l2_disable(struct uvc_device *uvc)
{
uvc_function_disconnect(uvc);
uvcg_video_enable(&uvc->video, 0);
uvcg_video_disable(&uvc->video);
uvcg_free_buffers(&uvc->video.queue);
uvc->func_connected = false;
wake_up_interruptible(&uvc->func_connected_queue);
@ -653,4 +660,3 @@ const struct v4l2_file_operations uvc_v4l2_fops = {
.get_unmapped_area = uvcg_v4l2_get_unmapped_area,
#endif
};

View file

@ -227,6 +227,28 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
/*
* Callers must take care to hold req_lock when this function may be called
* from multiple threads. For example, when frames are streaming to the host.
*/
static void
uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
{
sg_free_table(&ureq->sgt);
if (ureq->req && ep) {
usb_ep_free_request(ep, ureq->req);
ureq->req = NULL;
}
kfree(ureq->req_buffer);
ureq->req_buffer = NULL;
if (!list_empty(&ureq->list))
list_del_init(&ureq->list);
kfree(ureq);
}
static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
{
int ret;
@ -247,14 +269,128 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
return ret;
}
/* This function must be called with video->req_lock held. */
static int uvcg_video_usb_req_queue(struct uvc_video *video,
struct usb_request *req, bool queue_to_ep)
{
bool is_bulk = video->max_payload_size;
struct list_head *list = NULL;
if (!video->is_enabled) {
uvc_video_free_request(req->context, video->ep);
return -ENODEV;
}
if (queue_to_ep) {
struct uvc_request *ureq = req->context;
/*
* With USB3 handling more requests at a higher speed, we can't
* afford to generate an interrupt for every request. Decide to
* interrupt:
*
* - When no more requests are available in the free queue, as
* this may be our last chance to refill the endpoint's
* request queue.
*
* - When this is request is the last request for the video
* buffer, as we want to start sending the next video buffer
* ASAP in case it doesn't get started already in the next
* iteration of this loop.
*
* - Four times over the length of the requests queue (as
* indicated by video->uvc_num_requests), as a trade-off
* between latency and interrupt load.
*/
if (list_empty(&video->req_free) || ureq->last_buf ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
req->no_interrupt = 0;
} else {
req->no_interrupt = 1;
}
video->req_int_count++;
return uvcg_video_ep_queue(video, req);
}
/*
* If we're not queuing to the ep, for isoc we're queuing
* to the req_ready list, otherwise req_free.
*/
list = is_bulk ? &video->req_free : &video->req_ready;
list_add_tail(&req->list, list);
return 0;
}
/*
* Must only be called from uvcg_video_enable - since after that we only want to
* queue requests to the endpoint from the uvc_video_complete complete handler.
* This function is needed in order to 'kick start' the flow of requests from
* gadget driver to the usb controller.
*/
static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
{
struct usb_request *req = NULL;
unsigned long flags = 0;
unsigned int count = 0;
int ret = 0;
/*
* We only queue half of the free list since we still want to have
* some free usb_requests in the free list for the video_pump async_wq
* thread to encode uvc buffers into. Otherwise we could get into a
* situation where the free list does not have any usb requests to
* encode into - we always end up queueing 0 length requests to the
* end point.
*/
unsigned int half_list_size = video->uvc_num_requests / 2;
spin_lock_irqsave(&video->req_lock, flags);
/*
* Take these requests off the free list and queue them all to the
* endpoint. Since we queue 0 length requests with the req_lock held,
* there isn't any 'data' race involved here with the complete handler.
*/
while (count < half_list_size) {
req = list_first_entry(&video->req_free, struct usb_request,
list);
list_del(&req->list);
req->length = 0;
ret = uvcg_video_ep_queue(video, req);
if (ret < 0) {
uvcg_queue_cancel(&video->queue, 0);
break;
}
count++;
}
spin_unlock_irqrestore(&video->req_lock, flags);
}
static void
uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
{
struct uvc_request *ureq = req->context;
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
struct uvc_device *uvc = video->uvc;
struct uvc_buffer *last_buf;
unsigned long flags;
bool is_bulk = video->max_payload_size;
int ret = 0;
spin_lock_irqsave(&video->req_lock, flags);
if (!video->is_enabled) {
/*
* When is_enabled is false, uvcg_video_disable() ensures
* that in-flight uvc_buffers are returned, so we can
* safely call free_request without worrying about
* last_buf.
*/
uvc_video_free_request(ureq, ep);
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
last_buf = ureq->last_buf;
ureq->last_buf = NULL;
spin_unlock_irqrestore(&video->req_lock, flags);
switch (req->status) {
case 0:
@ -277,44 +413,76 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
uvcg_queue_cancel(queue, 0);
}
if (ureq->last_buf) {
uvcg_complete_buffer(&video->queue, ureq->last_buf);
ureq->last_buf = NULL;
if (last_buf) {
spin_lock_irqsave(&queue->irqlock, flags);
uvcg_complete_buffer(queue, last_buf);
spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
/*
* Video stream might have been disabled while we were
* processing the current usb_request. So make sure
* we're still streaming before queueing the usb_request
* back to req_free
*/
if (video->is_enabled) {
/*
* Here we check whether any request is available in the ready
* list. If it is, queue it to the ep and add the current
* usb_request to the req_free list - for video_pump to fill in.
* Otherwise, just use the current usb_request to queue a 0
* length request to the ep. Since we always add to the req_free
* list if we dequeue from the ready list, there will never
* be a situation where the req_free list is completely out of
* requests and cannot recover.
*/
struct usb_request *to_queue = req;
if (uvc->state == UVC_STATE_STREAMING)
queue_work(video->async_wq, &video->pump);
to_queue->length = 0;
if (!list_empty(&video->req_ready)) {
to_queue = list_first_entry(&video->req_ready,
struct usb_request, list);
list_del(&to_queue->list);
list_add_tail(&req->list, &video->req_free);
/*
* Queue work to the wq as well since it is possible that a
* buffer may not have been completely encoded with the set of
* in-flight usb requests for whih the complete callbacks are
* firing.
* In that case, if we do not queue work to the worker thread,
* the buffer will never be marked as complete - and therefore
* not be returned to userpsace. As a result,
* dequeue -> queue -> dequeue flow of uvc buffers will not
* happen.
*/
queue_work(video->async_wq, &video->pump);
}
/*
* Queue to the endpoint. The actual queueing to ep will
* only happen on one thread - the async_wq for bulk endpoints
* and this thread for isoc endpoints.
*/
ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
if (ret < 0)
uvcg_queue_cancel(queue, 0);
} else {
uvc_video_free_request(ureq, ep);
}
spin_unlock_irqrestore(&video->req_lock, flags);
}
static int
uvc_video_free_requests(struct uvc_video *video)
{
unsigned int i;
struct uvc_request *ureq, *temp;
if (video->ureq) {
for (i = 0; i < video->uvc_num_requests; ++i) {
sg_free_table(&video->ureq[i].sgt);
if (video->ureq[i].req) {
usb_ep_free_request(video->ep, video->ureq[i].req);
video->ureq[i].req = NULL;
}
if (video->ureq[i].req_buffer) {
kfree(video->ureq[i].req_buffer);
video->ureq[i].req_buffer = NULL;
}
}
kfree(video->ureq);
video->ureq = NULL;
}
list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
uvc_video_free_request(ureq, video->ep);
INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
INIT_LIST_HEAD(&video->req_ready);
video->req_size = 0;
return 0;
}
@ -322,6 +490,7 @@ uvc_video_free_requests(struct uvc_video *video)
static int
uvc_video_alloc_requests(struct uvc_video *video)
{
struct uvc_request *ureq;
unsigned int req_size;
unsigned int i;
int ret = -ENOMEM;
@ -332,29 +501,33 @@ uvc_video_alloc_requests(struct uvc_video *video)
* max_t(unsigned int, video->ep->maxburst, 1)
* (video->ep->mult);
video->ureq = kcalloc(video->uvc_num_requests, sizeof(struct uvc_request), GFP_KERNEL);
if (video->ureq == NULL)
return -ENOMEM;
for (i = 0; i < video->uvc_num_requests; ++i) {
video->ureq[i].req_buffer = kmalloc(req_size, GFP_KERNEL);
if (video->ureq[i].req_buffer == NULL)
for (i = 0; i < video->uvc_num_requests; i++) {
ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
if (ureq == NULL)
goto error;
video->ureq[i].req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
if (video->ureq[i].req == NULL)
INIT_LIST_HEAD(&ureq->list);
list_add_tail(&ureq->list, &video->ureqs);
ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
if (ureq->req_buffer == NULL)
goto error;
video->ureq[i].req->buf = video->ureq[i].req_buffer;
video->ureq[i].req->length = 0;
video->ureq[i].req->complete = uvc_video_complete;
video->ureq[i].req->context = &video->ureq[i];
video->ureq[i].video = video;
video->ureq[i].last_buf = NULL;
ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
if (ureq->req == NULL)
goto error;
list_add_tail(&video->ureq[i].req->list, &video->req_free);
ureq->req->buf = ureq->req_buffer;
ureq->req->length = 0;
ureq->req->complete = uvc_video_complete;
ureq->req->context = ureq;
ureq->video = video;
ureq->last_buf = NULL;
list_add_tail(&ureq->req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
sg_alloc_table(&video->ureq[i].sgt,
sg_alloc_table(&ureq->sgt,
DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
PAGE_SIZE) + 2, GFP_KERNEL);
}
@ -387,16 +560,18 @@ static void uvcg_video_pump(struct work_struct *work)
struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
bool buf_done;
int ret;
int ret = 0;
while (true) {
if (!video->ep->enabled)
return;
while (video->ep->enabled) {
/*
* Retrieve the first available USB request, protected by the
* request lock.
* Check is_enabled and retrieve the first available USB
* request, protected by the request lock.
*/
spin_lock_irqsave(&video->req_lock, flags);
if (list_empty(&video->req_free)) {
if (!video->is_enabled || list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
@ -414,15 +589,6 @@ static void uvcg_video_pump(struct work_struct *work)
if (buf != NULL) {
video->encode(req, video, buf);
buf_done = buf->state == UVC_BUF_STATE_DONE;
} else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
/*
* No video buffer available; the queue is still connected and
* we're transferring over ISOC. Queue a 0 length request to
* prevent missed ISOC transfers.
*/
req->length = 0;
buf_done = false;
} else {
/*
* Either the queue has been disconnected or no video buffer
@ -433,62 +599,123 @@ static void uvcg_video_pump(struct work_struct *work)
break;
}
/*
* With USB3 handling more requests at a higher speed, we can't
* afford to generate an interrupt for every request. Decide to
* interrupt:
*
* - When no more requests are available in the free queue, as
* this may be our last chance to refill the endpoint's
* request queue.
*
* - When this is request is the last request for the video
* buffer, as we want to start sending the next video buffer
* ASAP in case it doesn't get started already in the next
* iteration of this loop.
*
* - Four times over the length of the requests queue (as
* indicated by video->uvc_num_requests), as a trade-off
* between latency and interrupt load.
*/
if (list_empty(&video->req_free) || buf_done ||
!(video->req_int_count %
DIV_ROUND_UP(video->uvc_num_requests, 4))) {
video->req_int_count = 0;
req->no_interrupt = 0;
} else {
req->no_interrupt = 1;
}
/* Queue the USB request */
ret = uvcg_video_ep_queue(video, req);
spin_unlock_irqrestore(&queue->irqlock, flags);
spin_lock_irqsave(&video->req_lock, flags);
/* For bulk end points we queue from the worker thread
* since we would preferably not want to wait on requests
* to be ready, in the uvcg_video_complete() handler.
* For isoc endpoints we add the request to the ready list
* and only queue it to the endpoint from the complete handler.
*/
ret = uvcg_video_usb_req_queue(video, req, is_bulk);
spin_unlock_irqrestore(&video->req_lock, flags);
if (ret < 0) {
uvcg_queue_cancel(queue, 0);
break;
}
/* Endpoint now owns the request */
/* The request is owned by the endpoint / ready list. */
req = NULL;
video->req_int_count++;
}
if (!req)
return;
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
if (video->is_enabled)
list_add_tail(&req->list, &video->req_free);
else
uvc_video_free_request(req->context, video->ep);
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
/*
* Enable or disable the video stream.
* Disable the video stream
*/
int uvcg_video_enable(struct uvc_video *video, int enable)
int
uvcg_video_disable(struct uvc_video *video)
{
unsigned long flags;
struct list_head inflight_bufs;
struct usb_request *req, *temp;
struct uvc_buffer *buf, *btemp;
struct uvc_request *ureq, *utemp;
if (video->ep == NULL) {
uvcg_info(&video->uvc->func,
"Video disable failed, device is uninitialized.\n");
return -ENODEV;
}
INIT_LIST_HEAD(&inflight_bufs);
spin_lock_irqsave(&video->req_lock, flags);
video->is_enabled = false;
/*
* Remove any in-flight buffers from the uvc_requests
* because we want to return them before cancelling the
* queue. This ensures that we aren't stuck waiting for
* all complete callbacks to come through before disabling
* vb2 queue.
*/
list_for_each_entry(ureq, &video->ureqs, list) {
if (ureq->last_buf) {
list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
ureq->last_buf = NULL;
}
}
spin_unlock_irqrestore(&video->req_lock, flags);
cancel_work_sync(&video->pump);
uvcg_queue_cancel(&video->queue, 0);
spin_lock_irqsave(&video->req_lock, flags);
/*
* Remove all uvc_requests from ureqs with list_del_init
* This lets uvc_video_free_request correctly identify
* if the uvc_request is attached to a list or not when freeing
* memory.
*/
list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
list_del_init(&ureq->list);
list_for_each_entry_safe(req, temp, &video->req_free, list) {
list_del(&req->list);
uvc_video_free_request(req->context, video->ep);
}
list_for_each_entry_safe(req, temp, &video->req_ready, list) {
list_del(&req->list);
uvc_video_free_request(req->context, video->ep);
}
INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
INIT_LIST_HEAD(&video->req_ready);
video->req_size = 0;
spin_unlock_irqrestore(&video->req_lock, flags);
/*
* Return all the video buffers before disabling the queue.
*/
spin_lock_irqsave(&video->queue.irqlock, flags);
list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
list_del(&buf->queue);
uvcg_complete_buffer(&video->queue, buf);
}
spin_unlock_irqrestore(&video->queue.irqlock, flags);
uvcg_queue_enable(&video->queue, 0);
return 0;
}
/*
* Enable the video stream.
*/
int uvcg_video_enable(struct uvc_video *video)
{
unsigned int i;
int ret;
if (video->ep == NULL) {
@ -497,18 +724,13 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
return -ENODEV;
}
if (!enable) {
cancel_work_sync(&video->pump);
uvcg_queue_cancel(&video->queue, 0);
for (i = 0; i < video->uvc_num_requests; ++i)
if (video->ureq && video->ureq[i].req)
usb_ep_dequeue(video->ep, video->ureq[i].req);
uvc_video_free_requests(video);
uvcg_queue_enable(&video->queue, 0);
return 0;
}
/*
* Safe to access request related fields without req_lock because
* this is the only thread currently active, and no other
* request handling thread will become active until this function
* returns.
*/
video->is_enabled = true;
if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
return ret;
@ -525,7 +747,7 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
video->req_int_count = 0;
queue_work(video->async_wq, &video->pump);
uvc_video_ep_queue_initial_requests(video);
return ret;
}
@ -535,7 +757,10 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
*/
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
video->is_enabled = false;
INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
INIT_LIST_HEAD(&video->req_ready);
spin_lock_init(&video->req_lock);
INIT_WORK(&video->pump, uvcg_video_pump);

View file

@ -14,7 +14,8 @@
struct uvc_video;
int uvcg_video_enable(struct uvc_video *video, int enable);
int uvcg_video_enable(struct uvc_video *video);
int uvcg_video_disable(struct uvc_video *video);
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);

View file

@ -37,3 +37,26 @@ bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu)
else
return false;
}
/**
* gzvm_handle_guest_hvc() - Handle guest hvc
* @vcpu: Pointer to struct gzvm_vcpu struct
* Return:
* * true - This hvc has been processed, no need to back to VMM.
* * false - This hvc has not been processed, require userspace.
*/
bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu)
{
unsigned long ipa;
int ret;
switch (vcpu->run->hypercall.args[0]) {
case GZVM_HVC_MEM_RELINQUISH:
ipa = vcpu->run->hypercall.args[1];
ret = gzvm_handle_relinquish(vcpu, ipa);
return (ret == 0) ? true : false;
default:
break;
}
return false;
}

View file

@ -123,6 +123,16 @@ static int cmp_ppages(struct rb_node *node, const struct rb_node *parent)
return 0;
}
static int rb_ppage_cmp(const void *key, const struct rb_node *node)
{
struct gzvm_pinned_page *p = container_of(node,
struct gzvm_pinned_page,
node);
phys_addr_t ipa = (phys_addr_t)key;
return (ipa < p->ipa) ? -1 : (ipa > p->ipa);
}
static int gzvm_insert_ppage(struct gzvm *vm, struct gzvm_pinned_page *ppage)
{
if (rb_find_add(&ppage->node, &vm->pinned_pages, cmp_ppages))
@ -157,6 +167,33 @@ static int pin_one_page(struct gzvm *vm, unsigned long hva, u64 gpa)
return 0;
}
/**
* gzvm_handle_relinquish() - Handle memory relinquish request from hypervisor
*
* @vcpu: Pointer to struct gzvm_vcpu_run in userspace
* @ipa: Start address(gpa) of a reclaimed page
*
* Return: Always return 0 because there are no cases of failure
*/
int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa)
{
struct gzvm_pinned_page *ppage;
struct rb_node *node;
struct gzvm *vm = vcpu->gzvm;
node = rb_find((void *)ipa, &vm->pinned_pages, rb_ppage_cmp);
if (node)
rb_erase(node, &vm->pinned_pages);
else
return 0;
ppage = container_of(node, struct gzvm_pinned_page, node);
unpin_user_pages_dirty_lock(&ppage->page, 1, true);
kfree(ppage);
return 0;
}
static int handle_block_demand_page(struct gzvm *vm, int memslot_id, u64 gfn)
{
unsigned long hva;

View file

@ -113,12 +113,14 @@ static long gzvm_vcpu_run(struct gzvm_vcpu *vcpu, void __user *argp)
* it's geniezone's responsibility to fill corresponding data
* structure
*/
case GZVM_EXIT_HYPERCALL:
if (!gzvm_handle_guest_hvc(vcpu))
need_userspace = true;
break;
case GZVM_EXIT_EXCEPTION:
if (!gzvm_handle_guest_exception(vcpu))
need_userspace = true;
break;
case GZVM_EXIT_HYPERCALL:
fallthrough;
case GZVM_EXIT_DEBUG:
fallthrough;
case GZVM_EXIT_FAIL_ENTRY:

View file

@ -172,6 +172,8 @@ int gzvm_gfn_to_pfn_memslot(struct gzvm_memslot *memslot, u64 gfn, u64 *pfn);
int gzvm_find_memslot(struct gzvm *vm, u64 gpa);
int gzvm_handle_page_fault(struct gzvm_vcpu *vcpu);
bool gzvm_handle_guest_exception(struct gzvm_vcpu *vcpu);
int gzvm_handle_relinquish(struct gzvm_vcpu *vcpu, phys_addr_t ipa);
bool gzvm_handle_guest_hvc(struct gzvm_vcpu *vcpu);
int gzvm_arch_create_device(u16 vm_id, struct gzvm_create_device *gzvm_dev);
int gzvm_arch_inject_irq(struct gzvm *gzvm, unsigned int vcpu_idx,

View file

@ -419,6 +419,11 @@ DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg,
TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(now, cfs_rq, se), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_update_load_sum,
TP_PROTO(struct sched_avg *sa, u64 *delta, unsigned int *sched_pelt_lshift),
TP_ARGS(sa, delta, sched_pelt_lshift), 1);
DECLARE_RESTRICTED_HOOK(android_rvh_remove_entity_load_avg,
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
TP_ARGS(cfs_rq, se), 1);

View file

@ -311,6 +311,8 @@ extern "C" {
* index 1 = Cr:Cb plane, [39:0] Cr1:Cb1:Cr0:Cb0 little endian
*/
#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV30 fourcc_code('N', 'V', '3', '0') /* non-subsampled Cr:Cb plane */
/*
* 2 plane YCbCr MSB aligned

View file

@ -193,6 +193,11 @@ enum {
GZVM_EXCEPTION_PAGE_FAULT = 0x1,
};
/* hypercall definitions of GZVM_EXIT_HYPERCALL */
enum {
GZVM_HVC_MEM_RELINQUISH = 0xc6000009,
};
/**
* struct gzvm_vcpu_run: Same purpose as kvm_run, this struct is
* shared between userspace, kernel and

View file

@ -191,6 +191,7 @@ static int __restore_freezer_state(struct task_struct *p, void *arg)
if (state != TASK_RUNNING) {
WRITE_ONCE(p->__state, state);
p->saved_state = TASK_RUNNING;
return 1;
}
@ -205,7 +206,7 @@ void __thaw_task(struct task_struct *p)
if (WARN_ON_ONCE(freezing(p)))
goto unlock;
if (task_call_func(p, __restore_freezer_state, NULL))
if (!frozen(p) || task_call_func(p, __restore_freezer_state, NULL))
goto unlock;
wake_up_state(p, TASK_FROZEN);

View file

@ -404,6 +404,7 @@ struct bm_position {
struct mem_zone_bm_rtree *zone;
struct rtree_node *node;
unsigned long node_pfn;
unsigned long cur_pfn;
int node_bit;
};
@ -589,6 +590,7 @@ static void memory_bm_position_reset(struct memory_bitmap *bm)
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
bm->cur.cur_pfn = BM_END_OF_MAP;
bm->cur.node_bit = 0;
}
@ -799,6 +801,7 @@ node_found:
bm->cur.zone = zone;
bm->cur.node = node;
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
bm->cur.cur_pfn = pfn;
/* Set return values */
*addr = node->data;
@ -850,6 +853,11 @@ static void memory_bm_clear_current(struct memory_bitmap *bm)
clear_bit(bit, bm->cur.node->data);
}
static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
{
return bm->cur.cur_pfn;
}
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
@ -929,10 +937,12 @@ static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
if (bit < bits) {
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
bm->cur.node_bit = bit + 1;
bm->cur.cur_pfn = pfn;
return pfn;
}
} while (rtree_next_node(bm));
bm->cur.cur_pfn = BM_END_OF_MAP;
return BM_END_OF_MAP;
}
@ -1371,14 +1381,19 @@ static unsigned int count_data_pages(void)
/*
* This is needed, because copy_page and memcpy are not usable for copying
* task structs.
* task structs. Returns true if the page was filled with only zeros,
* otherwise false.
*/
static inline void do_copy_page(long *dst, long *src)
static inline bool do_copy_page(long *dst, long *src)
{
long z = 0;
int n;
for (n = PAGE_SIZE / sizeof(long); n; n--)
for (n = PAGE_SIZE / sizeof(long); n; n--) {
z |= *src;
*dst++ = *src++;
}
return !z;
}
/**
@ -1387,17 +1402,21 @@ static inline void do_copy_page(long *dst, long *src)
* Check if the page we are going to copy is marked as present in the kernel
* page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
* CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
* always returns 'true'.
* always returns 'true'. Returns true if the page was entirely composed of
* zeros, otherwise it will return false.
*/
static void safe_copy_page(void *dst, struct page *s_page)
static bool safe_copy_page(void *dst, struct page *s_page)
{
bool zeros_only;
if (kernel_page_present(s_page)) {
do_copy_page(dst, page_address(s_page));
zeros_only = do_copy_page(dst, page_address(s_page));
} else {
hibernate_map_page(s_page);
do_copy_page(dst, page_address(s_page));
zeros_only = do_copy_page(dst, page_address(s_page));
hibernate_unmap_page(s_page);
}
return zeros_only;
}
#ifdef CONFIG_HIGHMEM
@ -1407,17 +1426,18 @@ static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
struct page *s_page, *d_page;
void *src, *dst;
bool zeros_only;
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
src = kmap_atomic(s_page);
dst = kmap_atomic(d_page);
do_copy_page(dst, src);
zeros_only = do_copy_page(dst, src);
kunmap_atomic(dst);
kunmap_atomic(src);
} else {
@ -1426,30 +1446,39 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
* The page pointed to by src may contain some kernel
* data modified by kmap_atomic()
*/
safe_copy_page(buffer, s_page);
zeros_only = safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
} else {
safe_copy_page(page_address(d_page), s_page);
zeros_only = safe_copy_page(page_address(d_page), s_page);
}
}
return zeros_only;
}
#else
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
safe_copy_page(page_address(pfn_to_page(dst_pfn)),
return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
pfn_to_page(src_pfn));
}
#endif /* CONFIG_HIGHMEM */
static void copy_data_pages(struct memory_bitmap *copy_bm,
struct memory_bitmap *orig_bm)
/*
* Copy data pages will copy all pages into pages pulled from the copy_bm.
* If a page was entirely filled with zeros it will be marked in the zero_bm.
*
* Returns the number of pages copied.
*/
static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
struct memory_bitmap *orig_bm,
struct memory_bitmap *zero_bm)
{
unsigned long copied_pages = 0;
struct zone *zone;
unsigned long pfn;
unsigned long pfn, copy_pfn;
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
@ -1462,18 +1491,29 @@ static void copy_data_pages(struct memory_bitmap *copy_bm,
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
copy_pfn = memory_bm_next_pfn(copy_bm);
for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
if (unlikely(pfn == BM_END_OF_MAP))
break;
copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
if (copy_data_page(copy_pfn, pfn)) {
memory_bm_set_bit(zero_bm, pfn);
/* Use this copy_pfn for a page that is not full of zeros */
continue;
}
copied_pages++;
copy_pfn = memory_bm_next_pfn(copy_bm);
}
return copied_pages;
}
/* Total number of image pages */
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
/* Number of zero pages */
static unsigned int nr_zero_pages;
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
@ -1494,6 +1534,9 @@ static struct memory_bitmap orig_bm;
*/
static struct memory_bitmap copy_bm;
/* Memory bitmap which tracks which saveable pages were zero filled. */
static struct memory_bitmap zero_bm;
/**
* swsusp_free - Free pages allocated for hibernation image.
*
@ -1538,6 +1581,7 @@ loop:
out:
nr_copy_pages = 0;
nr_meta_pages = 0;
nr_zero_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
@ -1756,8 +1800,15 @@ int hibernate_preallocate_memory(void)
goto err_out;
}
error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
if (error) {
pr_err("Cannot allocate zero bitmap\n");
goto err_out;
}
alloc_normal = 0;
alloc_highmem = 0;
nr_zero_pages = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
@ -2037,19 +2088,19 @@ asmlinkage __visible int swsusp_save(void)
* Kill them.
*/
drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
/*
* End of critical section. From now on, we can write to memory,
* but we should not touch disk. This specially means we must _not_
* touch swap space! Except we must write out our image of course.
*/
nr_pages += nr_highmem;
nr_copy_pages = nr_pages;
/* We don't actually copy the zero pages */
nr_zero_pages = nr_pages - nr_copy_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
pr_info("Image created (%d pages copied)\n", nr_pages);
pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
return 0;
}
@ -2095,15 +2146,22 @@ static int init_header(struct swsusp_info *info)
return init_header_complete(info);
}
#define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
#define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
/**
* pack_pfns - Prepare PFNs for saving.
* @bm: Memory bitmap.
* @buf: Memory buffer to store the PFNs in.
* @zero_bm: Memory bitmap containing PFNs of zero pages.
*
* PFNs corresponding to set bits in @bm are stored in the area of memory
* pointed to by @buf (1 page at a time).
* pointed to by @buf (1 page at a time). Pages which were filled with only
* zeros will have the highest bit set in the packed format to distinguish
* them from PFNs which will be contained in the image file.
*/
static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
struct memory_bitmap *zero_bm)
{
int j;
@ -2111,6 +2169,8 @@ static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
if (memory_bm_test_bit(zero_bm, buf[j]))
buf[j] |= ENCODED_PFN_ZERO_FLAG;
}
}
@ -2152,7 +2212,7 @@ int snapshot_read_next(struct snapshot_handle *handle)
memory_bm_position_reset(&copy_bm);
} else if (handle->cur <= nr_meta_pages) {
clear_page(buffer);
pack_pfns(buffer, &orig_bm);
pack_pfns(buffer, &orig_bm, &zero_bm);
} else {
struct page *page;
@ -2248,22 +2308,37 @@ static int load_header(struct swsusp_info *info)
* unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
* @bm: Memory bitmap.
* @buf: Area of memory containing the PFNs.
* @zero_bm: Memory bitmap with the zero PFNs marked.
*
* For each element of the array pointed to by @buf (1 page at a time), set the
* corresponding bit in @bm.
* corresponding bit in @bm. If the page was originally populated with only
* zeros then a corresponding bit will also be set in @zero_bm.
*/
static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
struct memory_bitmap *zero_bm)
{
unsigned long decoded_pfn;
bool zero;
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
decoded_pfn = buf[j] & ENCODED_PFN_MASK;
if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
memory_bm_set_bit(bm, decoded_pfn);
if (zero) {
memory_bm_set_bit(zero_bm, decoded_pfn);
nr_zero_pages++;
}
} else {
if (!pfn_valid(decoded_pfn))
pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
(unsigned long long)PFN_PHYS(decoded_pfn));
return -EFAULT;
}
}
return 0;
@ -2483,6 +2558,7 @@ static inline void free_highmem_data(void) {}
* prepare_image - Make room for loading hibernation image.
* @new_bm: Uninitialized memory bitmap structure.
* @bm: Memory bitmap with unsafe pages marked.
* @zero_bm: Memory bitmap containing the zero pages.
*
* Use @bm to mark the pages that will be overwritten in the process of
* restoring the system memory state from the suspend image ("unsafe" pages)
@ -2493,10 +2569,15 @@ static inline void free_highmem_data(void) {}
* pages will be used for just yet. Instead, we mark them all as allocated and
* create a lists of "safe" pages to be used later. On systems with high
* memory a list of "safe" highmem pages is created too.
*
* Because it was not known which pages were unsafe when @zero_bm was created,
* make a copy of it and recreate it within safe pages.
*/
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
struct memory_bitmap *zero_bm)
{
unsigned int nr_pages, nr_highmem;
struct memory_bitmap tmp;
struct linked_page *lp;
int error;
@ -2513,6 +2594,24 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
duplicate_memory_bitmap(new_bm, bm);
memory_bm_free(bm, PG_UNSAFE_KEEP);
/* Make a copy of zero_bm so it can be created in safe pages */
error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
if (error)
goto Free;
duplicate_memory_bitmap(&tmp, zero_bm);
memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
/* Recreate zero_bm in safe pages */
error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
if (error)
goto Free;
duplicate_memory_bitmap(zero_bm, &tmp);
memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
/* At this point zero_bm is in safe pages and it can be used for restoring. */
if (nr_highmem > 0) {
error = prepare_highmem_image(bm, &nr_highmem);
if (error)
@ -2527,7 +2626,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
*
* nr_copy_pages cannot be less than allocated_unsafe_pages too.
*/
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
lp = get_image_page(GFP_ATOMIC, PG_SAFE);
@ -2540,7 +2639,7 @@ static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
nr_pages--;
}
/* Preallocate memory for the image */
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
if (!lp) {
@ -2628,8 +2727,9 @@ int snapshot_write_next(struct snapshot_handle *handle)
static struct chain_allocator ca;
int error = 0;
next:
/* Check if we have already loaded the entire image */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
return 0;
handle->sync_read = 1;
@ -2654,19 +2754,26 @@ int snapshot_write_next(struct snapshot_handle *handle)
if (error)
return error;
error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
if (error)
return error;
nr_zero_pages = 0;
hibernate_restore_protection_begin();
} else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, &copy_bm);
error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
if (error)
return error;
if (handle->cur == nr_meta_pages + 1) {
error = prepare_image(&orig_bm, &copy_bm);
error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
if (error)
return error;
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(&zero_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
handle->sync_read = 0;
@ -2683,6 +2790,14 @@ int snapshot_write_next(struct snapshot_handle *handle)
handle->sync_read = 0;
}
handle->cur++;
/* Zero pages were not included in the image, memset it and move on. */
if (handle->cur > nr_meta_pages + 1 &&
memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
memset(handle->buffer, 0, PAGE_SIZE);
goto next;
}
return PAGE_SIZE;
}
@ -2699,7 +2814,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
copy_last_highmem_page();
hibernate_restore_protect_page(handle->buffer);
/* Do that only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
memory_bm_recycle(&orig_bm);
free_highmem_data();
}
@ -2708,7 +2823,7 @@ void snapshot_write_finalize(struct snapshot_handle *handle)
int snapshot_image_loaded(struct snapshot_handle *handle)
{
return !(!nr_copy_pages || !last_highmem_page_copied() ||
handle->cur <= nr_meta_pages + nr_copy_pages);
handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
}
#ifdef CONFIG_HIGHMEM

View file

@ -24,6 +24,8 @@
* Author: Vincent Guittot <vincent.guittot@linaro.org>
*/
#include <trace/hooks/sched.h>
/*
* Approximate:
* val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
@ -202,6 +204,8 @@ ___update_load_sum(u64 now, struct sched_avg *sa,
sa->last_update_time += delta << 10;
trace_android_rvh_update_load_sum(sa, &delta, &sched_pelt_lshift);
/*
* running is a subset of runnable (weight) so running can't be set if
* runnable is clear. But there are some corner cases where the current

View file

@ -105,6 +105,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_sum);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_remove_entity_load_avg);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_blocked_fair);