Snap for 9496189 from 1462782685 to android14-6.1-keystone-qcom-release

Change-Id: Ieda15ead4c6f10d4b347489586cd686441c5244a
This commit is contained in:
Android Build Coastguard Worker 2023-01-17 08:10:43 +00:00
commit 925907e3aa
11 changed files with 152 additions and 67 deletions

View file

@ -140,6 +140,23 @@ disabling KASAN altogether or controlling its features:
- ``kasan.vmalloc=off`` or ``=on`` disables or enables tagging of vmalloc
allocations (default: ``on``).
- ``kasan.page_alloc.sample=<sampling interval>`` makes KASAN tag only every
Nth page_alloc allocation with the order equal or greater than
``kasan.page_alloc.sample.order``, where N is the value of the ``sample``
parameter (default: ``1``, or tag every such allocation).
This parameter is intended to mitigate the performance overhead introduced
by KASAN.
Note that enabling this parameter makes Hardware Tag-Based KASAN skip checks
of allocations chosen by sampling and thus miss bad accesses to these
allocations. Use the default value for accurate bug detection.
- ``kasan.page_alloc.sample.order=<minimum page order>`` specifies the minimum
order of allocations that are affected by sampling (default: ``3``).
Only applies when ``kasan.page_alloc.sample`` is set to a value greater
than ``1``.
This parameter is intended to allow sampling only large page_alloc
allocations, which is the biggest source of the performance overhead.
Error reports
~~~~~~~~~~~~~

View file

@ -1,5 +1,4 @@
CONFIG_UAPI_HEADER_TEST=y
CONFIG_LOCALVERSION="-mainline"
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@ -57,7 +56,7 @@ CONFIG_CP15_BARRIER_EMULATION=y
CONFIG_SETEND_EMULATION=y
CONFIG_RANDOMIZE_BASE=y
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard"
CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.page_alloc.sample=10 kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_DMI is not set
CONFIG_PM_WAKELOCKS=y

View file

@ -1,5 +1,4 @@
CONFIG_UAPI_HEADER_TEST=y
CONFIG_LOCALVERSION="-mainline"
CONFIG_KERNEL_LZ4=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y

View file

@ -468,9 +468,7 @@ EXPORT_SYMBOL_GPL(blk_crypto_register);
/**
* blk_crypto_derive_sw_secret() - Derive software secret from wrapped key
* @bdev: a block device whose hardware-wrapped keys implementation is
* compatible (blk_crypto_hw_wrapped_keys_compatible()) with all block
* devices on which the key will be used.
* @bdev: a block device that supports hardware-wrapped keys
* @eph_key: the hardware-wrapped key in ephemerally-wrapped form
* @eph_key_size: size of @eph_key in bytes
* @sw_secret: (output) the software secret
@ -507,20 +505,6 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
}
EXPORT_SYMBOL_GPL(blk_crypto_derive_sw_secret);
/**
* blk_crypto_hw_wrapped_keys_compatible() - Check HW-wrapped key compatibility
* @bdev1: the first block device
* @bdev2: the second block device
*
* Return: true if HW-wrapped keys used on @bdev1 can also be used on @bdev2.
*/
bool blk_crypto_hw_wrapped_keys_compatible(struct block_device *bdev1,
struct block_device *bdev2)
{
return bdev_get_queue(bdev1)->crypto_profile ==
bdev_get_queue(bdev2)->crypto_profile;
}
/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
* by child device

View file

@ -243,9 +243,6 @@ int fscrypt_derive_sw_secret(struct super_block *sb,
const u8 *wrapped_key, size_t wrapped_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
{
struct block_device **devs;
unsigned int num_devs;
unsigned int i;
int err;
/* The filesystem must be mounted with -o inlinecrypt. */
@ -256,31 +253,12 @@ int fscrypt_derive_sw_secret(struct super_block *sb,
return -EOPNOTSUPP;
}
/*
* Hardware-wrapped keys might be specific to a particular storage
* device, so for now we don't allow them to be used if the filesystem
* uses block devices with different crypto profiles. This way, there
* is no ambiguity about which ->derive_sw_secret method to call.
*/
devs = fscrypt_get_devices(sb, &num_devs);
if (IS_ERR(devs))
return PTR_ERR(devs);
for (i = 1; i < num_devs; i++) {
if (!blk_crypto_hw_wrapped_keys_compatible(devs[0], devs[i])) {
fscrypt_warn(NULL,
"%s: unsupported multi-device configuration for hardware-wrapped keys",
sb->s_id);
kfree(devs);
return -EOPNOTSUPP;
}
}
err = blk_crypto_derive_sw_secret(devs[0], wrapped_key,
err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
wrapped_key_size, sw_secret);
if (err == -EOPNOTSUPP)
fscrypt_warn(NULL,
"%s: block device doesn't support hardware-wrapped keys\n",
sb->s_id);
kfree(devs);
return err;
}

View file

@ -163,9 +163,6 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
const u8 *eph_key, size_t eph_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
bool blk_crypto_hw_wrapped_keys_compatible(struct block_device *bdev1,
struct block_device *bdev2);
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline bool bio_has_crypt_ctx(struct bio *bio)

View file

@ -120,12 +120,13 @@ static __always_inline void kasan_poison_pages(struct page *page,
__kasan_poison_pages(page, order, init);
}
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
static __always_inline void kasan_unpoison_pages(struct page *page,
bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
static __always_inline bool kasan_unpoison_pages(struct page *page,
unsigned int order, bool init)
{
if (kasan_enabled())
__kasan_unpoison_pages(page, order, init);
return __kasan_unpoison_pages(page, order, init);
return false;
}
void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
@ -249,8 +250,11 @@ static __always_inline bool kasan_check_byte(const void *addr)
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
bool init) {}
static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
bool init)
{
return false;
}
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
static inline void kasan_poison_slab(struct slab *slab) {}
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,

View file

@ -95,19 +95,24 @@ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
}
#endif /* CONFIG_KASAN_STACK */
void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
if (unlikely(PageHighMem(page)))
return;
return false;
if (!kasan_sample_page_alloc(order))
return false;
tag = kasan_random_tag();
kasan_unpoison(set_tag(page_address(page), tag),
PAGE_SIZE << order, init);
for (i = 0; i < (1 << order); i++)
page_kasan_tag_set(page + i, tag);
return true;
}
void __kasan_poison_pages(struct page *page, unsigned int order, bool init)

View file

@ -59,6 +59,24 @@ EXPORT_SYMBOL_GPL(kasan_mode);
/* Whether to enable vmalloc tagging. */
DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
#define PAGE_ALLOC_SAMPLE_DEFAULT 1
#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT 3
/*
* Sampling interval of page_alloc allocation (un)poisoning.
* Defaults to no sampling.
*/
unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
/*
* Minimum order of page_alloc allocations to be affected by sampling.
* The default value is chosen to match both
* PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
*/
unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
DEFINE_PER_CPU(long, kasan_page_alloc_skip);
/* kasan=off/on */
static int __init early_kasan_flag(char *arg)
{
@ -122,6 +140,48 @@ static inline const char *kasan_mode_info(void)
return "sync";
}
/* kasan.page_alloc.sample=<sampling interval> */
static int __init early_kasan_flag_page_alloc_sample(char *arg)
{
int rv;
if (!arg)
return -EINVAL;
rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
if (rv)
return rv;
if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
return -EINVAL;
}
return 0;
}
early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
/* kasan.page_alloc.sample.order=<minimum page order> */
static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
{
int rv;
if (!arg)
return -EINVAL;
rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
if (rv)
return rv;
if (kasan_page_alloc_sample_order > INT_MAX) {
kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
return -EINVAL;
}
return 0;
}
early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
/*
* kasan_init_hw_tags_cpu() is called for each CPU.
* Not marked as __init as a CPU can be hot-plugged after boot.

View file

@ -42,6 +42,10 @@ enum kasan_mode {
extern enum kasan_mode kasan_mode __ro_after_init;
extern unsigned long kasan_page_alloc_sample;
extern unsigned int kasan_page_alloc_sample_order;
DECLARE_PER_CPU(long, kasan_page_alloc_skip);
static inline bool kasan_vmalloc_enabled(void)
{
return static_branch_likely(&kasan_flag_vmalloc);
@ -57,6 +61,24 @@ static inline bool kasan_sync_fault_possible(void)
return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
}
static inline bool kasan_sample_page_alloc(unsigned int order)
{
/* Fast-path for when sampling is disabled. */
if (kasan_page_alloc_sample == 1)
return true;
if (order < kasan_page_alloc_sample_order)
return true;
if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
this_cpu_write(kasan_page_alloc_skip,
kasan_page_alloc_sample - 1);
return true;
}
return false;
}
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_async_fault_possible(void)
@ -69,6 +91,11 @@ static inline bool kasan_sync_fault_possible(void)
return true;
}
static inline bool kasan_sample_page_alloc(unsigned int order)
{
return true;
}
#endif /* CONFIG_KASAN_HW_TAGS */
#ifdef CONFIG_KASAN_GENERIC

View file

@ -1385,6 +1385,8 @@ out:
* see the comment next to it.
* 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
* see the comment next to it.
* 4. The allocation is excluded from being checked due to sampling,
* see the call to kasan_unpoison_pages.
*
* Poisoning pages during deferred memory init will greatly lengthen the
* process and cause problem in large memory systems as the deferred pages
@ -2494,7 +2496,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
{
bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
!should_skip_init(gfp_flags);
bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
bool reset_tags = !zero_tags;
int i;
set_page_private(page, 0);
@ -2517,30 +2520,42 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
*/
/*
* If memory tags should be zeroed (which happens only when memory
* should be initialized as well).
* If memory tags should be zeroed
* (which happens only when memory should be initialized as well).
*/
if (init_tags) {
if (zero_tags) {
/* Initialize both memory and tags. */
for (i = 0; i != 1 << order; ++i)
tag_clear_highpage(page + i);
/* Note that memory is already initialized by the loop above. */
/* Take note that memory was initialized by the loop above. */
init = false;
}
if (!should_skip_kasan_unpoison(gfp_flags)) {
/* Unpoison shadow memory or set memory tags. */
kasan_unpoison_pages(page, order, init);
/* Note that memory is already initialized by KASAN. */
if (kasan_has_integrated_init())
init = false;
} else {
/* Ensure page_address() dereferencing does not fault. */
/* Try unpoisoning (or setting tags) and initializing memory. */
if (kasan_unpoison_pages(page, order, init)) {
/* Take note that memory was initialized by KASAN. */
if (kasan_has_integrated_init())
init = false;
/* Take note that memory tags were set by KASAN. */
reset_tags = false;
} else {
/*
* KASAN decided to exclude this allocation from being
* poisoned due to sampling. Skip poisoning as well.
*/
SetPageSkipKASanPoison(page);
}
}
/*
* If memory tags have not been set, reset the page tags to ensure
* page_address() dereferencing does not fault.
*/
if (reset_tags) {
for (i = 0; i != 1 << order; ++i)
page_kasan_tag_reset(page + i);
}
/* If memory is still not initialized, do it now. */
/* If memory is still not initialized, initialize it now. */
if (init)
kernel_init_pages(page, 1 << order);
/* Propagate __GFP_SKIP_KASAN_POISON to page flags. */