Linux 5.13-rc2
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmChnGAeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGBS0IAJKCWwkob9JbkWOI rWSOSi5K9RKGhNHvMh9q7iPL69k/dpa8dxYMBqybD1Gm4WTCdD4sImAXgCjJmL13 DDddNSFRxafhgD06qx2otYJrCzqIB6QJUij9/GdD4KpRgpsKf/7aYrmJB8WlPxjC nw0gfkpHThbJ8LhfUaoUSZqzX8GWvmoemdy+8Ihff5vGWIs+MREcGTtEds2hwiVG qnaGrD8q3FEwpqlygX5aSgDhc2IlqHf7240CKpYRMVEOJG67vjioOzEUG5ZGU0Ng FnExscjc6Jn+uuoVZp++ATx8GACYEx87MWaHNJ5e3abpRG0Za3HUbYEtg55AMTPK 3v/j9KE= =4Jt+ -----END PGP SIGNATURE----- Merge tag 'v5.13-rc2' into android-mainline Linux 5.13-rc2 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Id1271b3946958331e58dfce50d4210e9bc426a61
This commit is contained in:
commit
ccd25efcb4
184 changed files with 1234 additions and 723 deletions
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/class/dax/
|
||||
Date: May, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description: Device DAX is the device-centric analogue of Filesystem
|
||||
DAX (CONFIG_FS_DAX). It allows memory ranges to be
|
||||
allocated and mapped without need of an intervening file
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/bus/nd/devices/regionX/nfit/ecc_unit_size
|
||||
Date: Aug, 2017
|
||||
KernelVersion: v4.14 (Removed v4.18)
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Size of a write request to a DIMM that will not incur a
|
||||
read-modify-write cycle at the memory controller.
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ Interface Table (NFIT)' section in the ACPI specification
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/serial
|
||||
Date: Jun, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Serial number of the NVDIMM (non-volatile dual in-line
|
||||
memory module), assigned by the module vendor.
|
||||
|
|
@ -14,7 +14,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/handle
|
||||
Date: Apr, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) The address (given by the _ADR object) of the device on its
|
||||
parent bus of the NVDIMM device containing the NVDIMM region.
|
||||
|
|
@ -23,7 +23,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/device
|
||||
Date: Apr, 2015
|
||||
KernelVersion: v4.1
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Device id for the NVDIMM, assigned by the module vendor.
|
||||
|
||||
|
|
@ -31,7 +31,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/rev_id
|
||||
Date: Jun, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Revision of the NVDIMM, assigned by the module vendor.
|
||||
|
||||
|
|
@ -39,7 +39,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/phys_id
|
||||
Date: Apr, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Handle (i.e., instance number) for the SMBIOS (system
|
||||
management BIOS) Memory Device structure describing the NVDIMM
|
||||
|
|
@ -49,7 +49,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/flags
|
||||
Date: Jun, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) The flags in the NFIT memory device sub-structure indicate
|
||||
the state of the data on the nvdimm relative to its energy
|
||||
|
|
@ -68,7 +68,7 @@ What: /sys/bus/nd/devices/nmemX/nfit/format1
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/formats
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) The interface codes indicate support for persistent memory
|
||||
mapped directly into system physical address space and / or a
|
||||
|
|
@ -84,7 +84,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/vendor
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Vendor id of the NVDIMM.
|
||||
|
||||
|
|
@ -92,7 +92,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/dsm_mask
|
||||
Date: May, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) The bitmask indicates the supported device specific control
|
||||
functions relative to the NVDIMM command family supported by the
|
||||
|
|
@ -102,7 +102,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/family
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Displays the NVDIMM family command sets. Values
|
||||
0, 1, 2 and 3 correspond to NVDIMM_FAMILY_INTEL,
|
||||
|
|
@ -118,7 +118,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/id
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) ACPI specification 6.2 section 5.2.25.9, defines an
|
||||
identifier for an NVDIMM, which refelects the id attribute.
|
||||
|
|
@ -127,7 +127,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_vendor
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Sub-system vendor id of the NVDIMM non-volatile memory
|
||||
subsystem controller.
|
||||
|
|
@ -136,7 +136,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_rev_id
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Sub-system revision id of the NVDIMM non-volatile memory subsystem
|
||||
controller, assigned by the non-volatile memory subsystem
|
||||
|
|
@ -146,7 +146,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/nfit/subsystem_device
|
||||
Date: Apr, 2016
|
||||
KernelVersion: v4.7
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) Sub-system device id for the NVDIMM non-volatile memory
|
||||
subsystem controller, assigned by the non-volatile memory
|
||||
|
|
@ -156,7 +156,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/ndbusX/nfit/revision
|
||||
Date: Jun, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) ACPI NFIT table revision number.
|
||||
|
||||
|
|
@ -164,7 +164,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/ndbusX/nfit/scrub
|
||||
Date: Sep, 2016
|
||||
KernelVersion: v4.9
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RW) This shows the number of full Address Range Scrubs (ARS)
|
||||
that have been completed since driver load time. Userspace can
|
||||
|
|
@ -177,7 +177,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/ndbusX/nfit/hw_error_scrub
|
||||
Date: Sep, 2016
|
||||
KernelVersion: v4.9
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RW) Provides a way to toggle the behavior between just adding
|
||||
the address (cache line) where the MCE happened to the poison
|
||||
|
|
@ -196,7 +196,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/ndbusX/nfit/dsm_mask
|
||||
Date: Jun, 2017
|
||||
KernelVersion: v4.13
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) The bitmask indicates the supported bus specific control
|
||||
functions. See the section named 'NVDIMM Root Device _DSMs' in
|
||||
|
|
@ -205,7 +205,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/ndbusX/nfit/firmware_activate_noidle
|
||||
Date: Apr, 2020
|
||||
KernelVersion: v5.8
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RW) The Intel platform implementation of firmware activate
|
||||
support exposes an option let the platform force idle devices in
|
||||
|
|
@ -225,7 +225,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/regionX/nfit/range_index
|
||||
Date: Jun, 2015
|
||||
KernelVersion: v4.2
|
||||
Contact: linux-nvdimm@lists.01.org
|
||||
Contact: nvdimm@lists.linux.dev
|
||||
Description:
|
||||
(RO) A unique number provided by the BIOS to identify an address
|
||||
range. Used by NVDIMM Region Mapping Structure to uniquely refer
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
What: /sys/bus/nd/devices/nmemX/papr/flags
|
||||
Date: Apr, 2020
|
||||
KernelVersion: v5.8
|
||||
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
|
||||
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
|
||||
Description:
|
||||
(RO) Report flags indicating various states of a
|
||||
papr-pmem NVDIMM device. Each flag maps to a one or
|
||||
|
|
@ -36,7 +36,7 @@ Description:
|
|||
What: /sys/bus/nd/devices/nmemX/papr/perf_stats
|
||||
Date: May, 2020
|
||||
KernelVersion: v5.9
|
||||
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
|
||||
Contact: linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, nvdimm@lists.linux.dev,
|
||||
Description:
|
||||
(RO) Report various performance stats related to papr-scm NVDIMM
|
||||
device. Each stat is reported on a new line with each line
|
||||
|
|
|
|||
|
|
@ -483,10 +483,11 @@ modprobe
|
|||
========
|
||||
|
||||
The full path to the usermode helper for autoloading kernel modules,
|
||||
by default "/sbin/modprobe". This binary is executed when the kernel
|
||||
requests a module. For example, if userspace passes an unknown
|
||||
filesystem type to mount(), then the kernel will automatically request
|
||||
the corresponding filesystem module by executing this usermode helper.
|
||||
by default ``CONFIG_MODPROBE_PATH``, which in turn defaults to
|
||||
"/sbin/modprobe". This binary is executed when the kernel requests a
|
||||
module. For example, if userspace passes an unknown filesystem type
|
||||
to mount(), then the kernel will automatically request the
|
||||
corresponding filesystem module by executing this usermode helper.
|
||||
This usermode helper should insert the needed module into the kernel.
|
||||
|
||||
This sysctl only affects module autoloading. It has no effect on the
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ LIBNVDIMM: Non-Volatile Devices
|
|||
|
||||
libnvdimm - kernel / libndctl - userspace helper library
|
||||
|
||||
linux-nvdimm@lists.01.org
|
||||
nvdimm@lists.linux.dev
|
||||
|
||||
Version 13
|
||||
|
||||
|
|
|
|||
|
|
@ -109,16 +109,19 @@ well as to make sure they aren't relying on some HCD-specific behavior.
|
|||
USB-Standard Types
|
||||
==================
|
||||
|
||||
In ``drivers/usb/common/common.c`` and ``drivers/usb/common/debug.c`` you
|
||||
will find the USB data types defined in chapter 9 of the USB specification.
|
||||
These data types are used throughout USB, and in APIs including this host
|
||||
side API, gadget APIs, usb character devices and debugfs interfaces.
|
||||
In ``include/uapi/linux/usb/ch9.h`` you will find the USB data types defined
|
||||
in chapter 9 of the USB specification. These data types are used throughout
|
||||
USB, and in APIs including this host side API, gadget APIs, usb character
|
||||
devices and debugfs interfaces. That file is itself included by
|
||||
``include/linux/usb/ch9.h``, which also contains declarations of a few
|
||||
utility routines for manipulating these data types; the implementations
|
||||
are in ``drivers/usb/common/common.c``.
|
||||
|
||||
.. kernel-doc:: drivers/usb/common/common.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/usb/common/debug.c
|
||||
:export:
|
||||
In addition, some functions useful for creating debugging output are
|
||||
defined in ``drivers/usb/common/debug.c``.
|
||||
|
||||
Host-Side Data Types and Macros
|
||||
===============================
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ Here is the main features of EROFS:
|
|||
|
||||
- Support POSIX.1e ACLs by using xattrs;
|
||||
|
||||
- Support transparent file compression as an option:
|
||||
LZ4 algorithm with 4 KB fixed-sized output compression for high performance.
|
||||
- Support transparent data compression as an option:
|
||||
LZ4 algorithm with the fixed-sized output compression for high performance.
|
||||
|
||||
The following git tree provides the file system user-space tools under
|
||||
development (ex, formatting tool mkfs.erofs):
|
||||
|
|
@ -113,31 +113,31 @@ may not. All metadatas can be now observed in two different spaces (views):
|
|||
|
||||
::
|
||||
|
||||
|-> aligned with 8B
|
||||
|-> followed closely
|
||||
+ meta_blkaddr blocks |-> another slot
|
||||
_____________________________________________________________________
|
||||
| ... | inode | xattrs | extents | data inline | ... | inode ...
|
||||
|________|_______|(optional)|(optional)|__(optional)_|_____|__________
|
||||
|-> aligned with the inode slot size
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
.____________________________________________________|-> aligned with 4B
|
||||
| xattr_ibody_header | shared xattrs | inline xattrs |
|
||||
|____________________|_______________|_______________|
|
||||
|-> 12 bytes <-|->x * 4 bytes<-| .
|
||||
. . .
|
||||
. . .
|
||||
. . .
|
||||
._______________________________.______________________.
|
||||
| id | id | id | id | ... | id | ent | ... | ent| ... |
|
||||
|____|____|____|____|______|____|_____|_____|____|_____|
|
||||
|-> aligned with 4B
|
||||
|-> aligned with 4B
|
||||
|-> aligned with 8B
|
||||
|-> followed closely
|
||||
+ meta_blkaddr blocks |-> another slot
|
||||
_____________________________________________________________________
|
||||
| ... | inode | xattrs | extents | data inline | ... | inode ...
|
||||
|________|_______|(optional)|(optional)|__(optional)_|_____|__________
|
||||
|-> aligned with the inode slot size
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
. .
|
||||
.____________________________________________________|-> aligned with 4B
|
||||
| xattr_ibody_header | shared xattrs | inline xattrs |
|
||||
|____________________|_______________|_______________|
|
||||
|-> 12 bytes <-|->x * 4 bytes<-| .
|
||||
. . .
|
||||
. . .
|
||||
. . .
|
||||
._______________________________.______________________.
|
||||
| id | id | id | id | ... | id | ent | ... | ent| ... |
|
||||
|____|____|____|____|______|____|_____|_____|____|_____|
|
||||
|-> aligned with 4B
|
||||
|-> aligned with 4B
|
||||
|
||||
Inode could be 32 or 64 bytes, which can be distinguished from a common
|
||||
field which all inode versions have -- i_format::
|
||||
|
|
@ -175,13 +175,13 @@ may not. All metadatas can be now observed in two different spaces (views):
|
|||
Each share xattr can also be directly found by the following formula:
|
||||
xattr offset = xattr_blkaddr * block_size + 4 * xattr_id
|
||||
|
||||
::
|
||||
::
|
||||
|
||||
|-> aligned by 4 bytes
|
||||
+ xattr_blkaddr blocks |-> aligned with 4 bytes
|
||||
_________________________________________________________________________
|
||||
| ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
|
||||
|________|_____________|_____________|_____|______________|_______________
|
||||
|-> aligned by 4 bytes
|
||||
+ xattr_blkaddr blocks |-> aligned with 4 bytes
|
||||
_________________________________________________________________________
|
||||
| ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ...
|
||||
|________|_____________|_____________|_____|______________|_______________
|
||||
|
||||
Directories
|
||||
-----------
|
||||
|
|
@ -193,48 +193,77 @@ algorithm (could refer to the related source code).
|
|||
|
||||
::
|
||||
|
||||
___________________________
|
||||
/ |
|
||||
/ ______________|________________
|
||||
/ / | nameoff1 | nameoffN-1
|
||||
____________.______________._______________v________________v__________
|
||||
| dirent | dirent | ... | dirent | filename | filename | ... | filename |
|
||||
|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
|
||||
\ ^
|
||||
\ | * could have
|
||||
\ | trailing '\0'
|
||||
\________________________| nameoff0
|
||||
|
||||
Directory block
|
||||
___________________________
|
||||
/ |
|
||||
/ ______________|________________
|
||||
/ / | nameoff1 | nameoffN-1
|
||||
____________.______________._______________v________________v__________
|
||||
| dirent | dirent | ... | dirent | filename | filename | ... | filename |
|
||||
|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____|
|
||||
\ ^
|
||||
\ | * could have
|
||||
\ | trailing '\0'
|
||||
\________________________| nameoff0
|
||||
Directory block
|
||||
|
||||
Note that apart from the offset of the first filename, nameoff0 also indicates
|
||||
the total number of directory entries in this block since it is no need to
|
||||
introduce another on-disk field at all.
|
||||
|
||||
Compression
|
||||
-----------
|
||||
Currently, EROFS supports 4KB fixed-sized output transparent file compression,
|
||||
as illustrated below::
|
||||
Data compression
|
||||
----------------
|
||||
EROFS implements LZ4 fixed-sized output compression which generates fixed-sized
|
||||
compressed data blocks from variable-sized input in contrast to other existing
|
||||
fixed-sized input solutions. Relatively higher compression ratios can be gotten
|
||||
by using fixed-sized output compression since nowadays popular data compression
|
||||
algorithms are mostly LZ77-based and such fixed-sized output approach can be
|
||||
benefited from the historical dictionary (aka. sliding window).
|
||||
|
||||
|---- Variant-Length Extent ----|-------- VLE --------|----- VLE -----
|
||||
clusterofs clusterofs clusterofs
|
||||
| | | logical data
|
||||
_________v_______________________________v_____________________v_______________
|
||||
... | . | | . | | . | ...
|
||||
____|____.________|_____________|________.____|_____________|__.__________|____
|
||||
|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|
|
||||
size size size size size
|
||||
. . . .
|
||||
. . . .
|
||||
. . . .
|
||||
_______._____________._____________._____________._____________________
|
||||
... | | | | ... physical data
|
||||
_______|_____________|_____________|_____________|_____________________
|
||||
|-> cluster <-|-> cluster <-|-> cluster <-|
|
||||
size size size
|
||||
In details, original (uncompressed) data is turned into several variable-sized
|
||||
extents and in the meanwhile, compressed into physical clusters (pclusters).
|
||||
In order to record each variable-sized extent, logical clusters (lclusters) are
|
||||
introduced as the basic unit of compress indexes to indicate whether a new
|
||||
extent is generated within the range (HEAD) or not (NONHEAD). Lclusters are now
|
||||
fixed in block size, as illustrated below::
|
||||
|
||||
Currently each on-disk physical cluster can contain 4KB (un)compressed data
|
||||
at most. For each logical cluster, there is a corresponding on-disk index to
|
||||
describe its cluster type, physical cluster address, etc.
|
||||
|<- variable-sized extent ->|<- VLE ->|
|
||||
clusterofs clusterofs clusterofs
|
||||
| | |
|
||||
_________v_________________________________v_______________________v________
|
||||
... | . | | . | | . ...
|
||||
____|____._________|______________|________.___ _|______________|__.________
|
||||
|-> lcluster <-|-> lcluster <-|-> lcluster <-|-> lcluster <-|
|
||||
(HEAD) (NONHEAD) (HEAD) (NONHEAD) .
|
||||
. CBLKCNT . .
|
||||
. . .
|
||||
. . .
|
||||
_______._____________________________.______________._________________
|
||||
... | | | | ...
|
||||
_______|______________|______________|______________|_________________
|
||||
|-> big pcluster <-|-> pcluster <-|
|
||||
|
||||
See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
|
||||
A physical cluster can be seen as a container of physical compressed blocks
|
||||
which contains compressed data. Previously, only lcluster-sized (4KB) pclusters
|
||||
were supported. After big pcluster feature is introduced (available since
|
||||
Linux v5.13), pcluster can be a multiple of lcluster size.
|
||||
|
||||
For each HEAD lcluster, clusterofs is recorded to indicate where a new extent
|
||||
starts and blkaddr is used to seek the compressed data. For each NONHEAD
|
||||
lcluster, delta0 and delta1 are available instead of blkaddr to indicate the
|
||||
distance to its HEAD lcluster and the next HEAD lcluster. A PLAIN lcluster is
|
||||
also a HEAD lcluster except that its data is uncompressed. See the comments
|
||||
around "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details.
|
||||
|
||||
If big pcluster is enabled, pcluster size in lclusters needs to be recorded as
|
||||
well. Let the delta0 of the first NONHEAD lcluster store the compressed block
|
||||
count with a special flag as a new called CBLKCNT NONHEAD lcluster. It's easy
|
||||
to understand its delta0 is constantly 1, as illustrated below::
|
||||
|
||||
__________________________________________________________
|
||||
| HEAD | NONHEAD | NONHEAD | ... | NONHEAD | HEAD | HEAD |
|
||||
|__:___|_(CBLKCNT)_|_________|_____|_________|__:___|____:_|
|
||||
|<----- a big pcluster (with CBLKCNT) ------>|<-- -->|
|
||||
a lcluster-sized pcluster (without CBLKCNT) ^
|
||||
|
||||
If another HEAD follows a HEAD lcluster, there is no room to record CBLKCNT,
|
||||
but it's easy to know the size of such pcluster is 1 lcluster as well.
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ is an arbitrary string allowed in a filesystem, e.g.::
|
|||
Each function provides its specific set of attributes, with either read-only
|
||||
or read-write access. Where applicable they need to be written to as
|
||||
appropriate.
|
||||
Please refer to Documentation/ABI/*/configfs-usb-gadget* for more information.
|
||||
Please refer to Documentation/ABI/testing/configfs-usb-gadget for more information.
|
||||
|
||||
4. Associating the functions with their configurations
|
||||
------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ to SEV::
|
|||
[ecx]:
|
||||
Bits[31:0] Number of encrypted guests supported simultaneously
|
||||
|
||||
If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
|
||||
If support for SEV is present, MSR 0xc001_0010 (MSR_AMD64_SYSCFG) and MSR 0xc001_0015
|
||||
(MSR_K7_HWCR) can be used to determine if it can be enabled::
|
||||
|
||||
0xc001_0010:
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ CPUID function 0x8000001f reports information related to SME::
|
|||
system physical addresses, not guest physical
|
||||
addresses)
|
||||
|
||||
If support for SME is present, MSR 0xc00100010 (MSR_K8_SYSCFG) can be used to
|
||||
If support for SME is present, MSR 0xc00100010 (MSR_AMD64_SYSCFG) can be used to
|
||||
determine if SME is enabled and/or to enable memory encryption::
|
||||
|
||||
0xc0010010:
|
||||
|
|
@ -79,7 +79,7 @@ The state of SME in the Linux kernel can be documented as follows:
|
|||
The CPU supports SME (determined through CPUID instruction).
|
||||
|
||||
- Enabled:
|
||||
Supported and bit 23 of MSR_K8_SYSCFG is set.
|
||||
Supported and bit 23 of MSR_AMD64_SYSCFG is set.
|
||||
|
||||
- Active:
|
||||
Supported, Enabled and the Linux kernel is actively applying
|
||||
|
|
@ -89,7 +89,7 @@ The state of SME in the Linux kernel can be documented as follows:
|
|||
SME can also be enabled and activated in the BIOS. If SME is enabled and
|
||||
activated in the BIOS, then all memory accesses will be encrypted and it will
|
||||
not be necessary to activate the Linux memory encryption support. If the BIOS
|
||||
merely enables SME (sets bit 23 of the MSR_K8_SYSCFG), then Linux can activate
|
||||
merely enables SME (sets bit 23 of the MSR_AMD64_SYSCFG), then Linux can activate
|
||||
memory encryption by default (CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT=y) or
|
||||
by supplying mem_encrypt=on on the kernel command line. However, if BIOS does
|
||||
not enable SME, then Linux will not be able to activate memory encryption, even
|
||||
|
|
|
|||
14
MAINTAINERS
14
MAINTAINERS
|
|
@ -5237,7 +5237,7 @@ DEVICE DIRECT ACCESS (DAX)
|
|||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
F: drivers/dax/
|
||||
|
||||
|
|
@ -7006,7 +7006,7 @@ M: Dan Williams <dan.j.williams@intel.com>
|
|||
R: Matthew Wilcox <willy@infradead.org>
|
||||
R: Jan Kara <jack@suse.cz>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
F: fs/dax.c
|
||||
F: include/linux/dax.h
|
||||
|
|
@ -10385,7 +10385,7 @@ LIBNVDIMM BLK: MMIO-APERTURE DRIVER
|
|||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
P: Documentation/nvdimm/maintainer-entry-profile.rst
|
||||
|
|
@ -10396,7 +10396,7 @@ LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
|
|||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
P: Documentation/nvdimm/maintainer-entry-profile.rst
|
||||
|
|
@ -10406,7 +10406,7 @@ LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
|
|||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
P: Documentation/nvdimm/maintainer-entry-profile.rst
|
||||
|
|
@ -10414,7 +10414,7 @@ F: drivers/nvdimm/pmem*
|
|||
|
||||
LIBNVDIMM: DEVICETREE BINDINGS
|
||||
M: Oliver O'Halloran <oohall@gmail.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
F: Documentation/devicetree/bindings/pmem/pmem-region.txt
|
||||
|
|
@ -10425,7 +10425,7 @@ M: Dan Williams <dan.j.williams@intel.com>
|
|||
M: Vishal Verma <vishal.l.verma@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
M: Ira Weiny <ira.weiny@intel.com>
|
||||
L: linux-nvdimm@lists.01.org
|
||||
L: nvdimm@lists.linux.dev
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
|
||||
P: Documentation/nvdimm/maintainer-entry-profile.rst
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Frozen Wasteland
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ endif
|
|||
|
||||
|
||||
ifdef CONFIG_ARC_CURR_IN_REG
|
||||
# For a global register defintion, make sure it gets passed to every file
|
||||
# For a global register definition, make sure it gets passed to every file
|
||||
# We had a customer reported bug where some code built in kernel was NOT using
|
||||
# any kernel headers, and missing the r25 global register
|
||||
# Can't do unconditionally because of recursive include issues
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
*
|
||||
* Technically the lock is also needed for UP (boils down to irq save/restore)
|
||||
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
|
||||
* be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
|
||||
* be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
|
||||
* Other way around, xchg is one instruction anyways, so can't be interrupted
|
||||
* as such
|
||||
*/
|
||||
|
|
@ -143,7 +143,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
|||
/*
|
||||
* "atomic" variant of xchg()
|
||||
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
|
||||
* Since xchg() doesn't always do that, it would seem that following defintion
|
||||
* Since xchg() doesn't always do that, it would seem that following definition
|
||||
* is incorrect. But here's the rationale:
|
||||
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
|
||||
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
|
||||
|
|
|
|||
|
|
@ -7,6 +7,18 @@
|
|||
|
||||
#include <uapi/asm/page.h>
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
|
||||
|
||||
#else /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#define PAGE_MASK_PHYS PAGE_MASK
|
||||
|
||||
#endif /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
|
||||
|
|
|
|||
|
|
@ -107,8 +107,8 @@
|
|||
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
|
||||
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SPECIAL)
|
||||
/* More Abbrevaited helpers */
|
||||
#define PAGE_U_NONE __pgprot(___DEF)
|
||||
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
|
||||
|
|
@ -132,13 +132,7 @@
|
|||
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
|
||||
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#else
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
|
||||
|
||||
/**************************************************************************
|
||||
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
|
||||
|
|
|
|||
|
|
@ -33,5 +33,4 @@
|
|||
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
|
||||
#endif /* _UAPI__ASM_ARC_PAGE_H */
|
||||
|
|
|
|||
|
|
@ -177,7 +177,7 @@ tracesys:
|
|||
|
||||
; Do the Sys Call as we normally would.
|
||||
; Validate the Sys Call number
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi tracesys_exit
|
||||
|
||||
|
|
@ -255,7 +255,7 @@ ENTRY(EV_Trap)
|
|||
;============ Normal syscall case
|
||||
|
||||
; syscall num shd not exceed the total system calls avail
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi .Lret_from_system_call
|
||||
|
||||
|
|
|
|||
|
|
@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
|
|||
ptr = &remcomInBuffer[1];
|
||||
if (kgdb_hex2long(&ptr, &addr))
|
||||
regs->ret = addr;
|
||||
fallthrough;
|
||||
|
||||
case 'D':
|
||||
case 'k':
|
||||
|
|
|
|||
|
|
@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
|
|||
int ret;
|
||||
|
||||
/*
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by defintion
|
||||
* This is only for old cores lacking LLOCK/SCOND, which by definition
|
||||
* can't possibly be SMP. Thus doesn't need to be SMP safe.
|
||||
* And this also helps reduce the overhead for serializing in
|
||||
* the UP case
|
||||
*/
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
|
||||
|
||||
/* Z indicates to userspace if operation succeded */
|
||||
/* Z indicates to userspace if operation succeeded */
|
||||
regs->status32 &= ~STATUS_Z_MASK;
|
||||
|
||||
ret = access_ok(uaddr, sizeof(*uaddr));
|
||||
|
|
@ -107,7 +107,7 @@ fail:
|
|||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* Re-enable interrupts <= default irq priority before commiting SLEEP */
|
||||
/* Re-enable interrupts <= default irq priority before committing SLEEP */
|
||||
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
|
||||
|
||||
__asm__ __volatile__(
|
||||
|
|
@ -120,7 +120,7 @@ void arch_cpu_idle(void)
|
|||
|
||||
void arch_cpu_idle(void)
|
||||
{
|
||||
/* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
|
||||
/* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
|
||||
__asm__ __volatile__("sleep 0x3 \n");
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -259,7 +259,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||
regs->r2 = (unsigned long)&sf->uc;
|
||||
|
||||
/*
|
||||
* small optim to avoid unconditonally calling do_sigaltstack
|
||||
* small optim to avoid unconditionally calling do_sigaltstack
|
||||
* in sigreturn path, now that we only have rt_sigreturn
|
||||
*/
|
||||
magic = MAGIC_SIGALTSTK;
|
||||
|
|
@ -391,7 +391,7 @@ void do_signal(struct pt_regs *regs)
|
|||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* ASM glue gaurantees that this is only called when returning to
|
||||
* ASM glue guarantees that this is only called when returning to
|
||||
* user mode
|
||||
*/
|
||||
if (test_thread_flag(TIF_NOTIFY_RESUME))
|
||||
|
|
|
|||
|
|
@ -157,7 +157,16 @@ void __init setup_arch_memory(void)
|
|||
min_high_pfn = PFN_DOWN(high_mem_start);
|
||||
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
||||
|
||||
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
|
||||
/*
|
||||
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
|
||||
* For HIGHMEM without PAE max_high_pfn should be less than
|
||||
* min_low_pfn to guarantee that these two regions don't overlap.
|
||||
* For PAE case highmem is greater than lowmem, so it is natural
|
||||
* to use max_high_pfn.
|
||||
*
|
||||
* In both cases, holes should be handled by pfn_valid().
|
||||
*/
|
||||
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
|
||||
|
||||
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
|
||||
|
||||
|
|
|
|||
|
|
@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
|
|||
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned int off;
|
||||
unsigned long vaddr;
|
||||
struct vm_struct *area;
|
||||
phys_addr_t off, end;
|
||||
phys_addr_t end;
|
||||
pgprot_t prot = __pgprot(flags);
|
||||
|
||||
/* Don't allow wraparound, zero size */
|
||||
|
|
@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
|||
|
||||
/* Mappings have to be page-aligned */
|
||||
off = paddr & ~PAGE_MASK;
|
||||
paddr &= PAGE_MASK;
|
||||
paddr &= PAGE_MASK_PHYS;
|
||||
size = PAGE_ALIGN(end + 1) - paddr;
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
|
|||
pte_t *ptep)
|
||||
{
|
||||
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
|
||||
struct page *page = pfn_to_page(pte_pfn(*ptep));
|
||||
|
||||
create_tlb(vma, vaddr, ptep);
|
||||
|
|
|
|||
|
|
@ -135,24 +135,18 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|||
return;
|
||||
}
|
||||
|
||||
int xen_swiotlb_detect(void)
|
||||
{
|
||||
if (!xen_domain())
|
||||
return 0;
|
||||
if (xen_feature(XENFEAT_direct_mapped))
|
||||
return 1;
|
||||
/* legacy case */
|
||||
if (!xen_feature(XENFEAT_not_direct_mapped) && xen_initial_domain())
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init xen_mm_init(void)
|
||||
{
|
||||
struct gnttab_cache_flush cflush;
|
||||
int rc;
|
||||
|
||||
if (!xen_swiotlb_detect())
|
||||
return 0;
|
||||
xen_swiotlb_init();
|
||||
|
||||
rc = xen_swiotlb_init();
|
||||
/* we can work with the default swiotlb */
|
||||
if (rc < 0 && rc != -EEXIST)
|
||||
return rc;
|
||||
|
||||
cflush.op = 0;
|
||||
cflush.a.dev_bus_addr = 0;
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@
|
|||
#include <linux/sizes.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/xen/swiotlb-xen.h>
|
||||
|
||||
/*
|
||||
* We need to be able to catch inadvertent references to memstart_addr
|
||||
|
|
@ -482,7 +483,7 @@ void __init mem_init(void)
|
|||
if (swiotlb_force == SWIOTLB_FORCE ||
|
||||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
|
||||
swiotlb_init(1);
|
||||
else
|
||||
else if (!xen_swiotlb_detect())
|
||||
swiotlb_force = SWIOTLB_NO_FORCE;
|
||||
|
||||
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
|
||||
|
|
|
|||
|
|
@ -448,6 +448,9 @@
|
|||
*/
|
||||
long plpar_hcall_norets(unsigned long opcode, ...);
|
||||
|
||||
/* Variant which does not do hcall tracing */
|
||||
long plpar_hcall_norets_notrace(unsigned long opcode, ...);
|
||||
|
||||
/**
|
||||
* plpar_hcall: - Make a pseries hypervisor call
|
||||
* @opcode: The hypervisor call to make.
|
||||
|
|
|
|||
|
|
@ -153,8 +153,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
|
|||
*/
|
||||
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
{
|
||||
if (user_mode(regs))
|
||||
kuep_unlock();
|
||||
}
|
||||
|
||||
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
|
||||
|
|
@ -222,6 +220,13 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
|
|||
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) &&
|
||||
regs->nip < (unsigned long)__end_interrupts) {
|
||||
// Kernel code running below __end_interrupts is
|
||||
// implicitly soft-masked.
|
||||
regs->softe = IRQS_ALL_DISABLED;
|
||||
}
|
||||
|
||||
/* Don't do any per-CPU operations until interrupt state is fixed */
|
||||
|
||||
if (nmi_disables_ftrace(regs)) {
|
||||
|
|
|
|||
|
|
@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
|
|||
return be32_to_cpu(yield_count);
|
||||
}
|
||||
|
||||
/*
|
||||
* Spinlock code confers and prods, so don't trace the hcalls because the
|
||||
* tracing code takes spinlocks which can cause recursion deadlocks.
|
||||
*
|
||||
* These calls are made while the lock is not held: the lock slowpath yields if
|
||||
* it can not acquire the lock, and unlock slow path might prod if a waiter has
|
||||
* yielded). So this may not be a problem for simple spin locks because the
|
||||
* tracing does not technically recurse on the lock, but we avoid it anyway.
|
||||
*
|
||||
* However the queued spin lock contended path is more strictly ordered: the
|
||||
* H_CONFER hcall is made after the task has queued itself on the lock, so then
|
||||
* recursing on that lock will cause the task to then queue up again behind the
|
||||
* first instance (or worse: queued spinlocks use tricks that assume a context
|
||||
* never waits on more than one spinlock, so such recursion may cause random
|
||||
* corruption in the lock code).
|
||||
*/
|
||||
static inline void yield_to_preempted(int cpu, u32 yield_count)
|
||||
{
|
||||
plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
|
||||
plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
|
||||
}
|
||||
|
||||
static inline void prod_cpu(int cpu)
|
||||
{
|
||||
plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
|
||||
plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
|
||||
}
|
||||
|
||||
static inline void yield_to_any(void)
|
||||
{
|
||||
plpar_hcall_norets(H_CONFER, -1, 0);
|
||||
plpar_hcall_norets_notrace(H_CONFER, -1, 0);
|
||||
}
|
||||
#else
|
||||
static inline bool is_shared_processor(void)
|
||||
|
|
|
|||
|
|
@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
|
|||
|
||||
static inline long cede_processor(void)
|
||||
{
|
||||
return plpar_hcall_norets(H_CEDE);
|
||||
/*
|
||||
* We cannot call tracepoints inside RCU idle regions which
|
||||
* means we must not trace H_CEDE.
|
||||
*/
|
||||
return plpar_hcall_norets_notrace(H_CEDE);
|
||||
}
|
||||
|
||||
static inline long extended_cede_processor(unsigned long latency_hint)
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ do { \
|
|||
"2: lwz%X1 %L0, %L1\n" \
|
||||
EX_TABLE(1b, %l2) \
|
||||
EX_TABLE(2b, %l2) \
|
||||
: "=r" (x) \
|
||||
: "=&r" (x) \
|
||||
: "m" (*addr) \
|
||||
: \
|
||||
: label)
|
||||
|
|
|
|||
|
|
@ -340,6 +340,12 @@ ret_from_mc_except:
|
|||
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
|
||||
bne masked_interrupt_book3e_##n
|
||||
|
||||
/*
|
||||
* Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
|
||||
* called, because that does SAVE_NVGPRS which must see the original register
|
||||
* values, otherwise the scratch values might be restored when exiting the
|
||||
* interrupt.
|
||||
*/
|
||||
#define PROLOG_ADDITION_2REGS_GEN(n) \
|
||||
std r14,PACA_EXGEN+EX_R14(r13); \
|
||||
std r15,PACA_EXGEN+EX_R15(r13)
|
||||
|
|
@ -535,6 +541,10 @@ __end_interrupts:
|
|||
PROLOG_ADDITION_2REGS)
|
||||
mfspr r14,SPRN_DEAR
|
||||
mfspr r15,SPRN_ESR
|
||||
std r14,_DAR(r1)
|
||||
std r15,_DSISR(r1)
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
EXCEPTION_COMMON(0x300)
|
||||
b storage_fault_common
|
||||
|
||||
|
|
@ -544,6 +554,10 @@ __end_interrupts:
|
|||
PROLOG_ADDITION_2REGS)
|
||||
li r15,0
|
||||
mr r14,r10
|
||||
std r14,_DAR(r1)
|
||||
std r15,_DSISR(r1)
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
EXCEPTION_COMMON(0x400)
|
||||
b storage_fault_common
|
||||
|
||||
|
|
@ -557,6 +571,10 @@ __end_interrupts:
|
|||
PROLOG_ADDITION_2REGS)
|
||||
mfspr r14,SPRN_DEAR
|
||||
mfspr r15,SPRN_ESR
|
||||
std r14,_DAR(r1)
|
||||
std r15,_DSISR(r1)
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
EXCEPTION_COMMON(0x600)
|
||||
b alignment_more /* no room, go out of line */
|
||||
|
||||
|
|
@ -565,10 +583,10 @@ __end_interrupts:
|
|||
NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
|
||||
PROLOG_ADDITION_1REG)
|
||||
mfspr r14,SPRN_ESR
|
||||
EXCEPTION_COMMON(0x700)
|
||||
std r14,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
EXCEPTION_COMMON(0x700)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl program_check_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
|
@ -725,11 +743,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
|||
* normal exception
|
||||
*/
|
||||
mfspr r14,SPRN_DBSR
|
||||
EXCEPTION_COMMON_CRIT(0xd00)
|
||||
std r14,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXCRIT+EX_R14(r13)
|
||||
ld r15,PACA_EXCRIT+EX_R15(r13)
|
||||
EXCEPTION_COMMON_CRIT(0xd00)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl DebugException
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
|
@ -796,11 +814,11 @@ kernel_dbg_exc:
|
|||
* normal exception
|
||||
*/
|
||||
mfspr r14,SPRN_DBSR
|
||||
EXCEPTION_COMMON_DBG(0xd08)
|
||||
std r14,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXDBG+EX_R14(r13)
|
||||
ld r15,PACA_EXDBG+EX_R15(r13)
|
||||
EXCEPTION_COMMON_DBG(0xd08)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl DebugException
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
|
@ -931,11 +949,7 @@ masked_interrupt_book3e_0x2c0:
|
|||
* original values stashed away in the PACA
|
||||
*/
|
||||
storage_fault_common:
|
||||
std r14,_DAR(r1)
|
||||
std r15,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
bl do_page_fault
|
||||
b interrupt_return
|
||||
|
||||
|
|
@ -944,11 +958,7 @@ storage_fault_common:
|
|||
* continues here.
|
||||
*/
|
||||
alignment_more:
|
||||
std r14,_DAR(r1)
|
||||
std r15,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r14,PACA_EXGEN+EX_R14(r13)
|
||||
ld r15,PACA_EXGEN+EX_R15(r13)
|
||||
bl alignment_exception
|
||||
REST_NVGPRS(r1)
|
||||
b interrupt_return
|
||||
|
|
|
|||
|
|
@ -34,9 +34,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
|||
syscall_fn f;
|
||||
|
||||
kuep_lock();
|
||||
#ifdef CONFIG_PPC32
|
||||
kuap_save_and_lock(regs);
|
||||
#endif
|
||||
|
||||
regs->orig_gpr3 = r3;
|
||||
|
||||
|
|
@ -427,6 +424,7 @@ again:
|
|||
|
||||
/* Restore user access locks last */
|
||||
kuap_user_restore(regs);
|
||||
kuep_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -356,13 +356,16 @@ static void __init setup_legacy_serial_console(int console)
|
|||
|
||||
static int __init ioremap_legacy_serial_console(void)
|
||||
{
|
||||
struct legacy_serial_info *info = &legacy_serial_infos[legacy_serial_console];
|
||||
struct plat_serial8250_port *port = &legacy_serial_ports[legacy_serial_console];
|
||||
struct plat_serial8250_port *port;
|
||||
struct legacy_serial_info *info;
|
||||
void __iomem *vaddr;
|
||||
|
||||
if (legacy_serial_console < 0)
|
||||
return 0;
|
||||
|
||||
info = &legacy_serial_infos[legacy_serial_console];
|
||||
port = &legacy_serial_ports[legacy_serial_console];
|
||||
|
||||
if (!info->early_addr)
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -166,9 +166,9 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from)
|
|||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#else
|
||||
#define unsafe_copy_fpr_to_user(to, task, label) do { } while (0)
|
||||
#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
|
||||
|
||||
#define unsafe_copy_fpr_from_user(task, from, label) do { } while (0)
|
||||
#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
|
||||
|
||||
static inline unsigned long
|
||||
copy_fpr_to_user(void __user *to, struct task_struct *task)
|
||||
|
|
|
|||
|
|
@ -840,7 +840,7 @@ bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
|
|||
kvm_unmap_radix(kvm, range->slot, gfn);
|
||||
} else {
|
||||
for (gfn = range->start; gfn < range->end; gfn++)
|
||||
kvm_unmap_rmapp(kvm, range->slot, range->start);
|
||||
kvm_unmap_rmapp(kvm, range->slot, gfn);
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/page.h>
|
||||
|
|
@ -149,17 +150,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
|
|||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
|
||||
if (types & STF_BARRIER_FALLBACK)
|
||||
// See comment in do_entry_flush_fixups() RE order of patching
|
||||
if (types & STF_BARRIER_FALLBACK) {
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_branch((struct ppc_inst *)(dest + 1),
|
||||
(unsigned long)&stf_barrier_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
patch_instruction((struct ppc_inst *)(dest + 1),
|
||||
ppc_inst(instrs[1]));
|
||||
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
(unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
|
||||
} else {
|
||||
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
}
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
|
||||
|
|
@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
|
|||
: "unknown");
|
||||
}
|
||||
|
||||
static int __do_stf_barrier_fixups(void *data)
|
||||
{
|
||||
enum stf_barrier_type *types = data;
|
||||
|
||||
do_stf_entry_barrier_fixups(*types);
|
||||
do_stf_exit_barrier_fixups(*types);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_stf_barrier_fixups(enum stf_barrier_type types)
|
||||
{
|
||||
do_stf_entry_barrier_fixups(types);
|
||||
do_stf_exit_barrier_fixups(types);
|
||||
/*
|
||||
* The call to the fallback entry flush, and the fallback/sync-ori exit
|
||||
* flush can not be safely patched in/out while other CPUs are executing
|
||||
* them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
|
||||
* spin in the stop machine core with interrupts hard disabled.
|
||||
*/
|
||||
stop_machine(__do_stf_barrier_fixups, &types, NULL);
|
||||
}
|
||||
|
||||
void do_uaccess_flush_fixups(enum l1d_flush_type types)
|
||||
|
|
@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
|
|||
: "unknown");
|
||||
}
|
||||
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
static int __do_entry_flush_fixups(void *data)
|
||||
{
|
||||
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
|
||||
unsigned int instrs[3], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
|
@ -309,6 +325,31 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
|||
if (types & L1D_FLUSH_MTTRIG)
|
||||
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
||||
|
||||
/*
|
||||
* If we're patching in or out the fallback flush we need to be careful about the
|
||||
* order in which we patch instructions. That's because it's possible we could
|
||||
* take a page fault after patching one instruction, so the sequence of
|
||||
* instructions must be safe even in a half patched state.
|
||||
*
|
||||
* To make that work, when patching in the fallback flush we patch in this order:
|
||||
* - the mflr (dest)
|
||||
* - the mtlr (dest + 2)
|
||||
* - the branch (dest + 1)
|
||||
*
|
||||
* That ensures the sequence is safe to execute at any point. In contrast if we
|
||||
* patch the mtlr last, it's possible we could return from the branch and not
|
||||
* restore LR, leading to a crash later.
|
||||
*
|
||||
* When patching out the fallback flush (either with nops or another flush type),
|
||||
* we patch in this order:
|
||||
* - the branch (dest + 1)
|
||||
* - the mtlr (dest + 2)
|
||||
* - the mflr (dest)
|
||||
*
|
||||
* Note we are protected by stop_machine() from other CPUs executing the code in a
|
||||
* semi-patched state.
|
||||
*/
|
||||
|
||||
start = PTRRELOC(&__start___entry_flush_fixup);
|
||||
end = PTRRELOC(&__stop___entry_flush_fixup);
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
|
|
@ -316,15 +357,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
|||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
|
||||
if (types == L1D_FLUSH_FALLBACK)
|
||||
patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
if (types == L1D_FLUSH_FALLBACK) {
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_branch((struct ppc_inst *)(dest + 1),
|
||||
(unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
|
||||
} else {
|
||||
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
|
||||
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
}
|
||||
}
|
||||
|
||||
start = PTRRELOC(&__start___scv_entry_flush_fixup);
|
||||
|
|
@ -334,15 +376,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
|||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
|
||||
if (types == L1D_FLUSH_FALLBACK)
|
||||
patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
if (types == L1D_FLUSH_FALLBACK) {
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_branch((struct ppc_inst *)(dest + 1),
|
||||
(unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
|
||||
} else {
|
||||
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
|
||||
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -354,6 +397,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
|||
: "ori type" :
|
||||
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
|
||||
: "unknown");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
/*
|
||||
* The call to the fallback flush can not be safely patched in/out while
|
||||
* other CPUs are executing it. So call __do_entry_flush_fixups() on one
|
||||
* CPU while all other CPUs spin in the stop machine core with interrupts
|
||||
* hard disabled.
|
||||
*/
|
||||
stop_machine(__do_entry_flush_fixups, &types, NULL);
|
||||
}
|
||||
|
||||
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
||||
|
|
|
|||
|
|
@ -102,6 +102,16 @@ END_FTR_SECTION(0, 1); \
|
|||
#define HCALL_BRANCH(LABEL)
|
||||
#endif
|
||||
|
||||
_GLOBAL_TOC(plpar_hcall_norets_notrace)
|
||||
HMT_MEDIUM
|
||||
|
||||
mfcr r0
|
||||
stw r0,8(r1)
|
||||
HVSC /* invoke the hypervisor */
|
||||
lwz r0,8(r1)
|
||||
mtcrf 0xff,r0
|
||||
blr /* return r3 = status */
|
||||
|
||||
_GLOBAL_TOC(plpar_hcall_norets)
|
||||
HMT_MEDIUM
|
||||
|
||||
|
|
|
|||
|
|
@ -1829,30 +1829,28 @@ void hcall_tracepoint_unregfunc(void)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Since the tracing code might execute hcalls we need to guard against
|
||||
* recursion. One example of this are spinlocks calling H_YIELD on
|
||||
* shared processor partitions.
|
||||
* Keep track of hcall tracing depth and prevent recursion. Warn if any is
|
||||
* detected because it may indicate a problem. This will not catch all
|
||||
* problems with tracing code making hcalls, because the tracing might have
|
||||
* been invoked from a non-hcall, so the first hcall could recurse into it
|
||||
* without warning here, but this better than nothing.
|
||||
*
|
||||
* Hcalls with specific problems being traced should use the _notrace
|
||||
* plpar_hcall variants.
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
|
||||
|
||||
|
||||
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
|
||||
notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int *depth;
|
||||
|
||||
/*
|
||||
* We cannot call tracepoints inside RCU idle regions which
|
||||
* means we must not trace H_CEDE.
|
||||
*/
|
||||
if (opcode == H_CEDE)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
depth = this_cpu_ptr(&hcall_trace_depth);
|
||||
|
||||
if (*depth)
|
||||
if (WARN_ON_ONCE(*depth))
|
||||
goto out;
|
||||
|
||||
(*depth)++;
|
||||
|
|
@ -1864,19 +1862,16 @@ out:
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
|
||||
notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int *depth;
|
||||
|
||||
if (opcode == H_CEDE)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
depth = this_cpu_ptr(&hcall_trace_depth);
|
||||
|
||||
if (*depth)
|
||||
if (*depth) /* Don't warn again on the way out */
|
||||
goto out;
|
||||
|
||||
(*depth)++;
|
||||
|
|
|
|||
|
|
@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
|
|||
|
||||
BUILD_TRAP_HANDLER(nmi)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
TRAP_HANDLER_DECL;
|
||||
|
||||
arch_ftrace_nmi_enter();
|
||||
|
|
|
|||
|
|
@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
|||
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
|
||||
KBUILD_CFLAGS += -Wundef
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
|
||||
|
|
@ -48,10 +49,10 @@ KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
|
|||
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
|
||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||
|
||||
# sev-es.c indirectly inludes inat-table.h which is generated during
|
||||
# sev.c indirectly inludes inat-table.h which is generated during
|
||||
# compilation and stored in $(objtree). Add the directory to the includes so
|
||||
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
|
||||
CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
|
||||
CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
|
||||
GCOV_PROFILE := n
|
||||
|
|
@ -93,7 +94,7 @@ ifdef CONFIG_X86_64
|
|||
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
|
||||
vmlinux-objs-y += $(obj)/mem_encrypt.o
|
||||
vmlinux-objs-y += $(obj)/pgtable_64.o
|
||||
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
|
||||
vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
|
||||
endif
|
||||
|
||||
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ void __puthex(unsigned long value)
|
|||
}
|
||||
}
|
||||
|
||||
#if CONFIG_X86_NEED_RELOCS
|
||||
#ifdef CONFIG_X86_NEED_RELOCS
|
||||
static void handle_relocations(void *output, unsigned long output_len,
|
||||
unsigned long virt_addr)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ struct mem_vector {
|
|||
u64 size;
|
||||
};
|
||||
|
||||
#if CONFIG_RANDOMIZE_BASE
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
/* kaslr.c */
|
||||
void choose_random_location(unsigned long input,
|
||||
unsigned long input_size,
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
#include "misc.h"
|
||||
|
||||
#include <asm/pgtable_types.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/trapnr.h>
|
||||
#include <asm/trap_pf.h>
|
||||
#include <asm/msr-index.h>
|
||||
|
|
@ -117,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
|||
#include "../../lib/insn.c"
|
||||
|
||||
/* Include code for early handlers */
|
||||
#include "../../kernel/sev-es-shared.c"
|
||||
#include "../../kernel/sev-shared.c"
|
||||
|
||||
static bool early_setup_sev_es(void)
|
||||
{
|
||||
|
|
@ -537,9 +537,9 @@
|
|||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
#define MSR_K8_TOP_MEM2 0xc001001d
|
||||
#define MSR_K8_SYSCFG 0xc0010010
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_AMD64_SYSCFG 0xc0010010
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
|
||||
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
|
||||
#define MSR_K8_INT_PENDING_MSG 0xc0010055
|
||||
/* C1E active bits in int pending message */
|
||||
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
|
||||
|
|
|
|||
|
|
@ -787,8 +787,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
|
|||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
extern u32 amd_get_nodes_per_socket(void);
|
||||
extern u32 amd_get_highest_perf(void);
|
||||
#else
|
||||
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
|
||||
static inline u32 amd_get_highest_perf(void) { return 0; }
|
||||
#endif
|
||||
|
||||
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
|
||||
|
|
|
|||
62
arch/x86/include/asm/sev-common.h
Normal file
62
arch/x86/include/asm/sev-common.h
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* AMD SEV header common between the guest and the hypervisor.
|
||||
*
|
||||
* Author: Brijesh Singh <brijesh.singh@amd.com>
|
||||
*/
|
||||
|
||||
#ifndef __ASM_X86_SEV_COMMON_H
|
||||
#define __ASM_X86_SEV_COMMON_H
|
||||
|
||||
#define GHCB_MSR_INFO_POS 0
|
||||
#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
|
||||
|
||||
#define GHCB_MSR_SEV_INFO_RESP 0x001
|
||||
#define GHCB_MSR_SEV_INFO_REQ 0x002
|
||||
#define GHCB_MSR_VER_MAX_POS 48
|
||||
#define GHCB_MSR_VER_MAX_MASK 0xffff
|
||||
#define GHCB_MSR_VER_MIN_POS 32
|
||||
#define GHCB_MSR_VER_MIN_MASK 0xffff
|
||||
#define GHCB_MSR_CBIT_POS 24
|
||||
#define GHCB_MSR_CBIT_MASK 0xff
|
||||
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
|
||||
((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
|
||||
(((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
|
||||
(((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
|
||||
GHCB_MSR_SEV_INFO_RESP)
|
||||
#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
|
||||
#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
|
||||
#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
|
||||
|
||||
#define GHCB_MSR_CPUID_REQ 0x004
|
||||
#define GHCB_MSR_CPUID_RESP 0x005
|
||||
#define GHCB_MSR_CPUID_FUNC_POS 32
|
||||
#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_VALUE_POS 32
|
||||
#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_REG_POS 30
|
||||
#define GHCB_MSR_CPUID_REG_MASK 0x3
|
||||
#define GHCB_CPUID_REQ_EAX 0
|
||||
#define GHCB_CPUID_REQ_EBX 1
|
||||
#define GHCB_CPUID_REQ_ECX 2
|
||||
#define GHCB_CPUID_REQ_EDX 3
|
||||
#define GHCB_CPUID_REQ(fn, reg) \
|
||||
(GHCB_MSR_CPUID_REQ | \
|
||||
(((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
|
||||
(((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
|
||||
#define GHCB_MSR_TERM_REASON_POS 16
|
||||
#define GHCB_MSR_TERM_REASON_MASK 0xff
|
||||
#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
|
||||
(((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
|
||||
((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
|
||||
|
||||
#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
|
||||
#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
|
||||
|
||||
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
|
||||
|
||||
#endif
|
||||
|
|
@ -10,34 +10,12 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/sev-common.h>
|
||||
|
||||
#define GHCB_SEV_INFO 0x001UL
|
||||
#define GHCB_SEV_INFO_REQ 0x002UL
|
||||
#define GHCB_INFO(v) ((v) & 0xfffUL)
|
||||
#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
|
||||
#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
|
||||
#define GHCB_PROTO_OUR 0x0001UL
|
||||
#define GHCB_SEV_CPUID_REQ 0x004UL
|
||||
#define GHCB_CPUID_REQ_EAX 0
|
||||
#define GHCB_CPUID_REQ_EBX 1
|
||||
#define GHCB_CPUID_REQ_ECX 2
|
||||
#define GHCB_CPUID_REQ_EDX 3
|
||||
#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
|
||||
(((unsigned long)reg & 3) << 30) | \
|
||||
(((unsigned long)fn) << 32))
|
||||
#define GHCB_PROTO_OUR 0x0001UL
|
||||
#define GHCB_PROTOCOL_MAX 1ULL
|
||||
#define GHCB_DEFAULT_USAGE 0ULL
|
||||
|
||||
#define GHCB_PROTOCOL_MAX 0x0001UL
|
||||
#define GHCB_DEFAULT_USAGE 0x0000UL
|
||||
|
||||
#define GHCB_SEV_CPUID_RESP 0x005UL
|
||||
#define GHCB_SEV_TERMINATE 0x100UL
|
||||
#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
|
||||
(((((u64)reason_set) & 0x7) << 12) | \
|
||||
((((u64)reason_val) & 0xff) << 16))
|
||||
#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
|
||||
#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
|
||||
|
||||
#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
|
||||
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
|
||||
|
||||
enum es_result {
|
||||
|
|
@ -7,4 +7,6 @@
|
|||
VDSO_CLOCKMODE_PVCLOCK, \
|
||||
VDSO_CLOCKMODE_HVCLOCK
|
||||
|
||||
#define HAVE_VDSO_CLOCKMODE_HVCLOCK
|
||||
|
||||
#endif /* __ASM_VDSO_CLOCKSOURCE_H */
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
|
|||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_early_printk.o = -pg
|
||||
CFLAGS_REMOVE_head64.o = -pg
|
||||
CFLAGS_REMOVE_sev-es.o = -pg
|
||||
CFLAGS_REMOVE_sev.o = -pg
|
||||
endif
|
||||
|
||||
KASAN_SANITIZE_head$(BITS).o := n
|
||||
|
|
@ -28,7 +28,7 @@ KASAN_SANITIZE_dumpstack.o := n
|
|||
KASAN_SANITIZE_dumpstack_$(BITS).o := n
|
||||
KASAN_SANITIZE_stacktrace.o := n
|
||||
KASAN_SANITIZE_paravirt.o := n
|
||||
KASAN_SANITIZE_sev-es.o := n
|
||||
KASAN_SANITIZE_sev.o := n
|
||||
|
||||
# With some compiler versions the generated code results in boot hangs, caused
|
||||
# by several compilation units. To be safe, disable all instrumentation.
|
||||
|
|
@ -148,7 +148,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
|
|||
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
|
||||
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
|
||||
|
||||
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
|
||||
obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
|
||||
###
|
||||
# 64 bit specific files
|
||||
ifeq ($(CONFIG_X86_64),y)
|
||||
|
|
|
|||
|
|
@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
|
||||
/* Check if memory encryption is enabled */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
goto clear_all;
|
||||
|
||||
/*
|
||||
|
|
@ -1165,3 +1165,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
u32 amd_get_highest_perf(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
|
||||
(c->x86_model >= 0x70 && c->x86_model < 0x80)))
|
||||
return 166;
|
||||
|
||||
if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
|
||||
(c->x86_model >= 0x40 && c->x86_model < 0x70)))
|
||||
return 166;
|
||||
|
||||
return 255;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_get_highest_perf);
|
||||
|
|
|
|||
|
|
@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
|
|||
if (boot_cpu_data.x86 < 0xf)
|
||||
return 0;
|
||||
/* In case some hypervisor doesn't pass SYSCFG through: */
|
||||
if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
|
||||
if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
|
||||
return 0;
|
||||
/*
|
||||
* Memory between 4GB and top of mem is forced WB by this magic bit.
|
||||
|
|
|
|||
|
|
@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
|
|||
(boot_cpu_data.x86 >= 0x0f)))
|
||||
return;
|
||||
|
||||
rdmsr(MSR_K8_SYSCFG, lo, hi);
|
||||
rdmsr(MSR_AMD64_SYSCFG, lo, hi);
|
||||
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
|
||||
pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
|
||||
" not cleared by BIOS, clearing this bit\n",
|
||||
smp_processor_id());
|
||||
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
|
||||
mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
|
||||
mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/extable.h>
|
||||
#include <asm/trapnr.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
/*
|
||||
* Manage page tables very early on.
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
|
|||
return;
|
||||
|
||||
/* SYS_CFG */
|
||||
address = MSR_K8_SYSCFG;
|
||||
address = MSR_AMD64_SYSCFG;
|
||||
rdmsrl(address, val);
|
||||
|
||||
/* TOP_MEM2 is not enabled? */
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@
|
|||
#include <asm/reboot.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/nmi.h>
|
||||
|
|
|
|||
|
|
@ -26,13 +26,13 @@ static bool __init sev_es_check_cpu_features(void)
|
|||
|
||||
static void __noreturn sev_es_terminate(unsigned int reason)
|
||||
{
|
||||
u64 val = GHCB_SEV_TERMINATE;
|
||||
u64 val = GHCB_MSR_TERM_REQ;
|
||||
|
||||
/*
|
||||
* Tell the hypervisor what went wrong - only reason-set 0 is
|
||||
* currently supported.
|
||||
*/
|
||||
val |= GHCB_SEV_TERMINATE_REASON(0, reason);
|
||||
val |= GHCB_SEV_TERM_REASON(0, reason);
|
||||
|
||||
/* Request Guest Termination from Hypvervisor */
|
||||
sev_es_wr_ghcb_msr(val);
|
||||
|
|
@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
|
|||
u64 val;
|
||||
|
||||
/* Do the GHCB protocol version negotiation */
|
||||
sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
|
||||
sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
|
||||
if (GHCB_INFO(val) != GHCB_SEV_INFO)
|
||||
if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
|
||||
return false;
|
||||
|
||||
if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
|
||||
GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
|
||||
if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
|
||||
GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
@ -153,28 +153,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
|
|||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->ax = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->bx = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->cx = val >> 32;
|
||||
|
||||
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
|
||||
VMGEXIT();
|
||||
val = sev_es_rd_ghcb_msr();
|
||||
if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
|
||||
if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
|
||||
goto fail;
|
||||
regs->dx = val >> 32;
|
||||
|
||||
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
#include <asm/cpu_entry_area.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/insn-eval.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/processor.h>
|
||||
|
|
@ -459,7 +459,7 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
|
|||
}
|
||||
|
||||
/* Include code shared with pre-decompression boot stage */
|
||||
#include "sev-es-shared.c"
|
||||
#include "sev-shared.c"
|
||||
|
||||
void noinstr __sev_es_nmi_complete(void)
|
||||
{
|
||||
|
|
@ -2043,7 +2043,7 @@ static bool amd_set_max_freq_ratio(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
highest_perf = perf_caps.highest_perf;
|
||||
highest_perf = amd_get_highest_perf();
|
||||
nominal_perf = perf_caps.nominal_perf;
|
||||
|
||||
if (!highest_perf || !nominal_perf) {
|
||||
|
|
|
|||
|
|
@ -863,8 +863,8 @@ static __init void svm_adjust_mmio_mask(void)
|
|||
return;
|
||||
|
||||
/* If memory encryption is not enabled, use existing mask */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
return;
|
||||
|
||||
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/bits.h>
|
||||
|
||||
#include <asm/svm.h>
|
||||
#include <asm/sev-common.h>
|
||||
|
||||
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
|
||||
|
||||
|
|
@ -525,40 +526,9 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
|
|||
|
||||
/* sev.c */
|
||||
|
||||
#define GHCB_VERSION_MAX 1ULL
|
||||
#define GHCB_VERSION_MIN 1ULL
|
||||
#define GHCB_VERSION_MAX 1ULL
|
||||
#define GHCB_VERSION_MIN 1ULL
|
||||
|
||||
#define GHCB_MSR_INFO_POS 0
|
||||
#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
|
||||
|
||||
#define GHCB_MSR_SEV_INFO_RESP 0x001
|
||||
#define GHCB_MSR_SEV_INFO_REQ 0x002
|
||||
#define GHCB_MSR_VER_MAX_POS 48
|
||||
#define GHCB_MSR_VER_MAX_MASK 0xffff
|
||||
#define GHCB_MSR_VER_MIN_POS 32
|
||||
#define GHCB_MSR_VER_MIN_MASK 0xffff
|
||||
#define GHCB_MSR_CBIT_POS 24
|
||||
#define GHCB_MSR_CBIT_MASK 0xff
|
||||
#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
|
||||
((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
|
||||
(((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
|
||||
(((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
|
||||
GHCB_MSR_SEV_INFO_RESP)
|
||||
|
||||
#define GHCB_MSR_CPUID_REQ 0x004
|
||||
#define GHCB_MSR_CPUID_RESP 0x005
|
||||
#define GHCB_MSR_CPUID_FUNC_POS 32
|
||||
#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_VALUE_POS 32
|
||||
#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
|
||||
#define GHCB_MSR_CPUID_REG_POS 30
|
||||
#define GHCB_MSR_CPUID_REG_MASK 0x3
|
||||
|
||||
#define GHCB_MSR_TERM_REQ 0x100
|
||||
#define GHCB_MSR_TERM_REASON_SET_POS 12
|
||||
#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
|
||||
#define GHCB_MSR_TERM_REASON_POS 16
|
||||
#define GHCB_MSR_TERM_REASON_MASK 0xff
|
||||
|
||||
extern unsigned int max_sev_asid;
|
||||
|
||||
|
|
|
|||
|
|
@ -3468,7 +3468,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
case MSR_IA32_LASTBRANCHTOIP:
|
||||
case MSR_IA32_LASTINTFROMIP:
|
||||
case MSR_IA32_LASTINTTOIP:
|
||||
case MSR_K8_SYSCFG:
|
||||
case MSR_AMD64_SYSCFG:
|
||||
case MSR_K8_TSEG_ADDR:
|
||||
case MSR_K8_TSEG_MASK:
|
||||
case MSR_VM_HSAVE_PA:
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
#include <xen/xen.h>
|
||||
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/kdebug.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -529,7 +529,7 @@ void __init sme_enable(struct boot_params *bp)
|
|||
/*
|
||||
* No SME if Hypervisor bit is set. This check is here to
|
||||
* prevent a guest from trying to enable SME. For running as a
|
||||
* KVM guest the MSR_K8_SYSCFG will be sufficient, but there
|
||||
* KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
|
||||
* might be other hypervisors which emulate that MSR as non-zero
|
||||
* or even pass it through to the guest.
|
||||
* A malicious hypervisor can still trick a guest into this
|
||||
|
|
@ -542,8 +542,8 @@ void __init sme_enable(struct boot_params *bp)
|
|||
return;
|
||||
|
||||
/* For SME, check the SYSCFG MSR */
|
||||
msr = __rdmsr(MSR_K8_SYSCFG);
|
||||
if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
|
||||
msr = __rdmsr(MSR_AMD64_SYSCFG);
|
||||
if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
|
||||
return;
|
||||
} else {
|
||||
/* SEV state cannot be controlled by a command line option */
|
||||
|
|
|
|||
|
|
@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
|
|||
|
||||
/* need to take out [4G, TOM2) for RAM*/
|
||||
/* SYS_CFG */
|
||||
address = MSR_K8_SYSCFG;
|
||||
address = MSR_AMD64_SYSCFG;
|
||||
rdmsrl(address, val);
|
||||
/* TOP_MEM2 is enabled? */
|
||||
if (val & (1<<21)) {
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
/*
|
||||
* We allocate runtime services regions top-down, starting from -4G, i.e.
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/crash.h>
|
||||
#include <asm/sev-es.h>
|
||||
#include <asm/sev.h>
|
||||
|
||||
struct real_mode_header *real_mode_header;
|
||||
u32 *trampoline_cr4_features;
|
||||
|
|
|
|||
|
|
@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
|
|||
*/
|
||||
btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
|
||||
jnc .Ldone
|
||||
movl $MSR_K8_SYSCFG, %ecx
|
||||
movl $MSR_AMD64_SYSCFG, %ecx
|
||||
rdmsr
|
||||
bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
|
||||
bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
|
||||
jc .Ldone
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -372,9 +372,38 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync)
|
|||
return bic->bfqq[is_sync];
|
||||
}
|
||||
|
||||
static void bfq_put_stable_ref(struct bfq_queue *bfqq);
|
||||
|
||||
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync)
|
||||
{
|
||||
/*
|
||||
* If bfqq != NULL, then a non-stable queue merge between
|
||||
* bic->bfqq and bfqq is happening here. This causes troubles
|
||||
* in the following case: bic->bfqq has also been scheduled
|
||||
* for a possible stable merge with bic->stable_merge_bfqq,
|
||||
* and bic->stable_merge_bfqq == bfqq happens to
|
||||
* hold. Troubles occur because bfqq may then undergo a split,
|
||||
* thereby becoming eligible for a stable merge. Yet, if
|
||||
* bic->stable_merge_bfqq points exactly to bfqq, then bfqq
|
||||
* would be stably merged with itself. To avoid this anomaly,
|
||||
* we cancel the stable merge if
|
||||
* bic->stable_merge_bfqq == bfqq.
|
||||
*/
|
||||
bic->bfqq[is_sync] = bfqq;
|
||||
|
||||
if (bfqq && bic->stable_merge_bfqq == bfqq) {
|
||||
/*
|
||||
* Actually, these same instructions are executed also
|
||||
* in bfq_setup_cooperator, in case of abort or actual
|
||||
* execution of a stable merge. We could avoid
|
||||
* repeating these instructions there too, but if we
|
||||
* did so, we would nest even more complexity in this
|
||||
* function.
|
||||
*/
|
||||
bfq_put_stable_ref(bic->stable_merge_bfqq);
|
||||
|
||||
bic->stable_merge_bfqq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
|
||||
|
|
@ -2263,10 +2292,9 @@ static void bfq_remove_request(struct request_queue *q,
|
|||
|
||||
}
|
||||
|
||||
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct bfq_data *bfqd = q->elevator->elevator_data;
|
||||
struct request *free = NULL;
|
||||
/*
|
||||
|
|
@ -2631,8 +2659,6 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq,
|
|||
static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd,
|
||||
struct bfq_queue *bfqq);
|
||||
|
||||
static void bfq_put_stable_ref(struct bfq_queue *bfqq);
|
||||
|
||||
/*
|
||||
* Attempt to schedule a merge of bfqq with the currently in-service
|
||||
* queue or with a close queue among the scheduled queues. Return
|
||||
|
|
|
|||
|
|
@ -1069,7 +1069,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
|||
|
||||
lockdep_assert_held(&ioc->lock);
|
||||
|
||||
inuse = clamp_t(u32, inuse, 1, active);
|
||||
/*
|
||||
* For an active leaf node, its inuse shouldn't be zero or exceed
|
||||
* @active. An active internal node's inuse is solely determined by the
|
||||
* inuse to active ratio of its children regardless of @inuse.
|
||||
*/
|
||||
if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
|
||||
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
||||
iocg->child_active_sum);
|
||||
} else {
|
||||
inuse = clamp_t(u32, inuse, 1, active);
|
||||
}
|
||||
|
||||
iocg->last_inuse = iocg->inuse;
|
||||
if (save)
|
||||
|
|
@ -1086,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
|||
/* update the level sums */
|
||||
parent->child_active_sum += (s32)(active - child->active);
|
||||
parent->child_inuse_sum += (s32)(inuse - child->inuse);
|
||||
/* apply the udpates */
|
||||
/* apply the updates */
|
||||
child->active = active;
|
||||
child->inuse = inuse;
|
||||
|
||||
|
|
|
|||
|
|
@ -358,14 +358,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
|
|||
unsigned int nr_segs)
|
||||
{
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
|
||||
struct blk_mq_ctx *ctx;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
bool ret = false;
|
||||
enum hctx_type type;
|
||||
|
||||
if (e && e->type->ops.bio_merge)
|
||||
return e->type->ops.bio_merge(hctx, bio, nr_segs);
|
||||
return e->type->ops.bio_merge(q, bio, nr_segs);
|
||||
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
|
||||
type = hctx->type;
|
||||
if (!(hctx->flags & BLK_MQ_F_SHOULD_MERGE) ||
|
||||
list_empty_careful(&ctx->rq_lists[type]))
|
||||
|
|
|
|||
|
|
@ -2232,8 +2232,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
/* Bypass scheduler for flush requests */
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
|
||||
!blk_queue_nonrot(q))) {
|
||||
} else if (plug && (q->nr_hw_queues == 1 ||
|
||||
blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
|
||||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
|
||||
/*
|
||||
* Use plugging if we have a ->commit_rqs() hook as well, as
|
||||
* we know the driver uses bd->last in a smart fashion.
|
||||
|
|
@ -3285,10 +3286,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
|
|||
/* tags can _not_ be used after returning from blk_mq_exit_queue */
|
||||
void blk_mq_exit_queue(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
blk_mq_del_queue_tag_set(q);
|
||||
/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
|
||||
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
||||
/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
|
||||
blk_mq_del_queue_tag_set(q);
|
||||
}
|
||||
|
||||
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
|
||||
|
|
|
|||
|
|
@ -561,11 +561,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
|
|||
}
|
||||
}
|
||||
|
||||
static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
|
||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
|
||||
struct kyber_hctx_data *khd = hctx->sched_data;
|
||||
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
|
||||
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
|
||||
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
|
||||
struct list_head *rq_list = &kcq->rq_list[sched_domain];
|
||||
|
|
|
|||
|
|
@ -461,10 +461,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
|
|||
return ELEVATOR_NO_MERGE;
|
||||
}
|
||||
|
||||
static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
|
||||
static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
|
||||
unsigned int nr_segs)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct request *free = NULL;
|
||||
bool ret;
|
||||
|
|
|
|||
|
|
@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out)
|
|||
}
|
||||
|
||||
/**
|
||||
* efi_partition(struct parsed_partitions *state)
|
||||
* efi_partition - scan for GPT partitions
|
||||
* @state: disk parsed partitions
|
||||
*
|
||||
* Description: called from check.c, if the disk contains GPT
|
||||
|
|
|
|||
|
|
@ -686,6 +686,13 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
|
||||
{
|
||||
if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID)
|
||||
return sizeof(*spa);
|
||||
return sizeof(*spa) - 8;
|
||||
}
|
||||
|
||||
static bool add_spa(struct acpi_nfit_desc *acpi_desc,
|
||||
struct nfit_table_prev *prev,
|
||||
struct acpi_nfit_system_address *spa)
|
||||
|
|
@ -693,22 +700,22 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
|
|||
struct device *dev = acpi_desc->dev;
|
||||
struct nfit_spa *nfit_spa;
|
||||
|
||||
if (spa->header.length != sizeof(*spa))
|
||||
if (spa->header.length != sizeof_spa(spa))
|
||||
return false;
|
||||
|
||||
list_for_each_entry(nfit_spa, &prev->spas, list) {
|
||||
if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
|
||||
if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) {
|
||||
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
|
||||
nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa),
|
||||
GFP_KERNEL);
|
||||
if (!nfit_spa)
|
||||
return false;
|
||||
INIT_LIST_HEAD(&nfit_spa->list);
|
||||
memcpy(nfit_spa->spa, spa, sizeof(*spa));
|
||||
memcpy(nfit_spa->spa, spa, sizeof_spa(spa));
|
||||
list_add_tail(&nfit_spa->list, &acpi_desc->spas);
|
||||
dev_dbg(dev, "spa index: %d type: %s\n",
|
||||
spa->range_index,
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
|
|||
fwnode_links_purge_consumers(fwnode);
|
||||
}
|
||||
|
||||
static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
|
||||
void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct fwnode_handle *child;
|
||||
|
||||
|
|
@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
|
|||
fwnode_for_each_available_child_node(fwnode, child)
|
||||
fw_devlink_purge_absent_suppliers(child);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
|
||||
|
||||
#ifdef CONFIG_SRCU
|
||||
static DEFINE_MUTEX(device_links_lock);
|
||||
|
|
|
|||
|
|
@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
|||
* config ref and try to destroy the workqueue from inside the work
|
||||
* queue.
|
||||
*/
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
if (nbd->recv_workq)
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
|
|
@ -2014,12 +2015,11 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
|
|||
return -EINVAL;
|
||||
}
|
||||
mutex_unlock(&nbd_index_mutex);
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs)) {
|
||||
nbd_put(nbd);
|
||||
return 0;
|
||||
}
|
||||
if (!refcount_inc_not_zero(&nbd->config_refs))
|
||||
goto put_nbd;
|
||||
nbd_disconnect_and_put(nbd);
|
||||
nbd_config_put(nbd);
|
||||
put_nbd:
|
||||
nbd_put(nbd);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4623,6 +4623,9 @@ int of_clk_add_provider(struct device_node *np,
|
|||
struct of_clk_provider *cp;
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
||||
if (!cp)
|
||||
return -ENOMEM;
|
||||
|
|
@ -4662,6 +4665,9 @@ int of_clk_add_hw_provider(struct device_node *np,
|
|||
struct of_clk_provider *cp;
|
||||
int ret;
|
||||
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
|
||||
if (!cp)
|
||||
return -ENOMEM;
|
||||
|
|
@ -4759,6 +4765,9 @@ void of_clk_del_provider(struct device_node *np)
|
|||
{
|
||||
struct of_clk_provider *cp;
|
||||
|
||||
if (!np)
|
||||
return;
|
||||
|
||||
mutex_lock(&of_clk_mutex);
|
||||
list_for_each_entry(cp, &of_clk_providers, link) {
|
||||
if (cp->node == np) {
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
|
|||
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
|
||||
}
|
||||
|
||||
#ifdef VDSO_CLOCKMODE_HVCLOCK
|
||||
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
|
||||
static int hv_cs_enable(struct clocksource *cs)
|
||||
{
|
||||
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
|
||||
|
|
@ -435,7 +435,7 @@ static struct clocksource hyperv_cs_tsc = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
.suspend= suspend_hv_clock_tsc,
|
||||
.resume = resume_hv_clock_tsc,
|
||||
#ifdef VDSO_CLOCKMODE_HVCLOCK
|
||||
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
|
||||
.enable = hv_cs_enable,
|
||||
.vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
highest_perf = perf_caps.highest_perf;
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
|
||||
highest_perf = amd_get_highest_perf();
|
||||
else
|
||||
highest_perf = perf_caps.highest_perf;
|
||||
|
||||
nominal_perf = perf_caps.nominal_perf;
|
||||
|
||||
if (!highest_perf || !nominal_perf) {
|
||||
|
|
|
|||
|
|
@ -3083,7 +3083,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
|
|||
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
|
||||
|
||||
/* Check first whether TOP_MEM2 is enabled: */
|
||||
rdmsrl(MSR_K8_SYSCFG, msr_val);
|
||||
rdmsrl(MSR_AMD64_SYSCFG, msr_val);
|
||||
if (msr_val & BIT(21)) {
|
||||
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
|
||||
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
|
||||
|
|
|
|||
|
|
@ -1095,44 +1095,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Optimize link config in order: max bpp, min lanes, min clock */
|
||||
static int
|
||||
intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
const struct link_config_limits *limits)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
|
||||
int bpp, clock, lane_count;
|
||||
int mode_rate, link_clock, link_avail;
|
||||
|
||||
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
|
||||
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
|
||||
|
||||
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
|
||||
output_bpp);
|
||||
|
||||
for (lane_count = limits->min_lane_count;
|
||||
lane_count <= limits->max_lane_count;
|
||||
lane_count <<= 1) {
|
||||
for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
|
||||
link_clock = intel_dp->common_rates[clock];
|
||||
link_avail = intel_dp_max_data_rate(link_clock,
|
||||
lane_count);
|
||||
|
||||
if (mode_rate <= link_avail) {
|
||||
pipe_config->lane_count = lane_count;
|
||||
pipe_config->pipe_bpp = bpp;
|
||||
pipe_config->port_clock = link_clock;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
|
||||
{
|
||||
int i, num_bpc;
|
||||
|
|
@ -1382,22 +1344,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
|
|||
intel_dp_can_bigjoiner(intel_dp))
|
||||
pipe_config->bigjoiner = true;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
/*
|
||||
* Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
|
||||
* section A.1: "It is recommended that the minimum number of
|
||||
* lanes be used, using the minimum link rate allowed for that
|
||||
* lane configuration."
|
||||
*
|
||||
* Note that we fall back to the max clock and lane count for eDP
|
||||
* panels that fail with the fast optimal settings (see
|
||||
* intel_dp->use_max_params), in which case the fast vs. wide
|
||||
* choice doesn't matter.
|
||||
*/
|
||||
ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
|
||||
else
|
||||
/* Optimize for slow and wide. */
|
||||
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
|
||||
/*
|
||||
* Optimize for slow and wide for everything, because there are some
|
||||
* eDP 1.3 and 1.4 panels don't work well with fast and narrow.
|
||||
*/
|
||||
ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
|
||||
|
||||
/* enable compression if the mode doesn't fit available BW */
|
||||
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
|
||||
|
|
@ -2160,7 +2111,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
|
|||
* -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
|
||||
* -sink is HDMI2.1
|
||||
*/
|
||||
if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) ||
|
||||
if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
|
||||
!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
|
||||
intel_dp->frl.is_trained)
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
|||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_active_call static void
|
||||
intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
|
|||
struct i915_ggtt_view view;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
chunk = roundup(chunk, tile_row_pages(obj));
|
||||
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
|
||||
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
view.partial.offset = rounddown(page_offset, chunk);
|
||||
|
|
|
|||
|
|
@ -641,7 +641,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
|
|||
|
||||
err = pin_pt_dma(vm, pde->pt.base);
|
||||
if (err) {
|
||||
i915_gem_object_put(pde->pt.base);
|
||||
free_pd(vm, pde);
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
|||
* banks of memory are paired and unswizzled on the
|
||||
* uneven portion, so leave that as unknown.
|
||||
*/
|
||||
if (intel_uncore_read(uncore, C0DRB3) ==
|
||||
intel_uncore_read(uncore, C1DRB3)) {
|
||||
if (intel_uncore_read16(uncore, C0DRB3) ==
|
||||
intel_uncore_read16(uncore, C1DRB3)) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void auto_retire(struct i915_active *ref)
|
||||
__i915_active_call static void
|
||||
auto_retire(struct i915_active *ref)
|
||||
{
|
||||
i915_active_put(ref);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
|||
{
|
||||
struct device_node *phandle;
|
||||
|
||||
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
|
||||
if (IS_ERR(a6xx_gpu->llc_mmio))
|
||||
return;
|
||||
|
||||
/*
|
||||
* There is a different programming path for targets with an mmu500
|
||||
* attached, so detect if that is the case
|
||||
|
|
@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
|
|||
of_device_is_compatible(phandle, "arm,mmu-500"));
|
||||
of_node_put(phandle);
|
||||
|
||||
if (a6xx_gpu->have_mmu500)
|
||||
a6xx_gpu->llc_mmio = NULL;
|
||||
else
|
||||
a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
|
||||
|
||||
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
|
||||
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
|
||||
|
||||
|
|
|
|||
|
|
@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
|
|||
dp_audio_setup_acr(audio);
|
||||
dp_audio_safe_to_exit_level(audio);
|
||||
dp_audio_enable(audio, true);
|
||||
dp_display_signal_audio_start(dp_display);
|
||||
dp_display->audio_enabled = true;
|
||||
|
||||
end:
|
||||
|
|
|
|||
|
|
@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void dp_display_signal_audio_start(struct msm_dp *dp_display)
|
||||
{
|
||||
struct dp_display_private *dp;
|
||||
|
||||
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
|
||||
reinit_completion(&dp->audio_comp);
|
||||
}
|
||||
|
||||
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
|
||||
{
|
||||
struct dp_display_private *dp;
|
||||
|
|
@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
|
|||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_CONNECT_PENDING) {
|
||||
dp_display_enable(dp, 0);
|
||||
if (state == ST_CONNECT_PENDING)
|
||||
dp->hpd_state = ST_CONNECTED;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
|
|
@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
|||
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
|
||||
|
||||
/* signal the disconnect event early to ensure proper teardown */
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(g_dp_display, false);
|
||||
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
|
||||
|
|
@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
|
|||
mutex_lock(&dp->event_mutex);
|
||||
|
||||
state = dp->hpd_state;
|
||||
if (state == ST_DISCONNECT_PENDING) {
|
||||
dp_display_disable(dp, 0);
|
||||
if (state == ST_DISCONNECT_PENDING)
|
||||
dp->hpd_state = ST_DISCONNECTED;
|
||||
}
|
||||
|
||||
mutex_unlock(&dp->event_mutex);
|
||||
|
||||
|
|
@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
|
|||
/* wait only if audio was enabled */
|
||||
if (dp_display->audio_enabled) {
|
||||
/* signal the disconnect event */
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(dp_display, false);
|
||||
if (!wait_for_completion_timeout(&dp->audio_comp,
|
||||
HZ * 5))
|
||||
|
|
@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
|
|||
|
||||
status = dp_catalog_link_is_connected(dp->catalog);
|
||||
|
||||
if (status)
|
||||
/*
|
||||
* can not declared display is connected unless
|
||||
* HDMI cable is plugged in and sink_count of
|
||||
* dongle become 1
|
||||
*/
|
||||
if (status && dp->link->sink_count)
|
||||
dp->dp_display.is_connected = true;
|
||||
else
|
||||
dp->dp_display.is_connected = false;
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
|
|||
int dp_display_request_irq(struct msm_dp *dp_display);
|
||||
bool dp_display_check_video_test(struct msm_dp *dp_display);
|
||||
int dp_display_get_test_bpp(struct msm_dp *dp_display);
|
||||
void dp_display_signal_audio_start(struct msm_dp *dp_display);
|
||||
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
|
||||
|
||||
#endif /* _DP_DISPLAY_H_ */
|
||||
|
|
|
|||
|
|
@ -843,7 +843,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
|
|||
if (pixel_clk_provider)
|
||||
*pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
|
||||
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
|
||||
|
|
|
|||
|
|
@ -405,6 +405,10 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
|
|||
if (!vco_name)
|
||||
return -ENOMEM;
|
||||
|
||||
parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
|
||||
if (!parent_name)
|
||||
return -ENOMEM;
|
||||
|
||||
clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
|
||||
if (!clk_name)
|
||||
return -ENOMEM;
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
|
||||
*/
|
||||
#define MSM_VERSION_MAJOR 1
|
||||
#define MSM_VERSION_MINOR 6
|
||||
#define MSM_VERSION_MINOR 7
|
||||
#define MSM_VERSION_PATCHLEVEL 0
|
||||
|
||||
static const struct drm_mode_config_funcs mode_config_funcs = {
|
||||
|
|
|
|||
|
|
@ -190,13 +190,25 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
|
|||
}
|
||||
|
||||
p = get_pages(obj);
|
||||
|
||||
if (!IS_ERR(p)) {
|
||||
msm_obj->pin_count++;
|
||||
update_inactive(msm_obj);
|
||||
}
|
||||
|
||||
msm_gem_unlock(obj);
|
||||
return p;
|
||||
}
|
||||
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj)
|
||||
{
|
||||
/* when we start tracking the pin count, then do something here */
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
msm_gem_lock(obj);
|
||||
msm_obj->pin_count--;
|
||||
GEM_WARN_ON(msm_obj->pin_count < 0);
|
||||
update_inactive(msm_obj);
|
||||
msm_gem_unlock(obj);
|
||||
}
|
||||
|
||||
int msm_gem_mmap_obj(struct drm_gem_object *obj,
|
||||
|
|
@ -646,6 +658,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
|
|||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
update_inactive(msm_obj);
|
||||
}
|
||||
|
||||
return msm_obj->vaddr;
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
|
|||
/* imported/exported objects are not purgeable: */
|
||||
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
return msm_obj->base.dma_buf && msm_obj->base.import_attach;
|
||||
return msm_obj->base.import_attach || msm_obj->pin_count;
|
||||
}
|
||||
|
||||
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
|
||||
|
|
@ -271,7 +271,7 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
|
|||
|
||||
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
|
||||
{
|
||||
return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
|
||||
return is_unpurgeable(msm_obj) || msm_obj->vaddr;
|
||||
}
|
||||
|
||||
static inline void mark_evictable(struct msm_gem_object *msm_obj)
|
||||
|
|
|
|||
|
|
@ -229,7 +229,6 @@ config DMARD10
|
|||
config HID_SENSOR_ACCEL_3D
|
||||
depends on HID_SENSOR_HUB
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
select HID_SENSOR_IIO_COMMON
|
||||
select HID_SENSOR_IIO_TRIGGER
|
||||
tristate "HID Accelerometers 3D"
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
|
|||
tristate "Common module (trigger) for all HID Sensor IIO drivers"
|
||||
depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
|
||||
select IIO_TRIGGER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say yes here to build trigger support for HID sensors.
|
||||
Triggers will be send if all requested attributes were read.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue