soc: qcom: Add tmecom, hwkm and ice driver

Add tmecom, hwkm and ice-crypto drivers for pineapple.
These are mainly used for supporting storage encryption
using HWKM (residing in TME).

Change-Id: I38c97654a643f8545bd56aa559ccba588aabbb90
Signed-off-by: Smita Ghosh <quic_smitag@quicinc.com>
Signed-off-by: Gaurav Kashyap <quic_gaurkash@quicinc.com>
This commit is contained in:
Gaurav Kashyap 2022-10-31 11:29:19 -07:00
parent 67fde14ade
commit a8a29ed6e7
21 changed files with 3594 additions and 0 deletions

View file

@ -632,6 +632,33 @@ config QCOM_HUNG_TASK_ENH
processes in iowait for specific situation will be monitored
to avoid devices long time no response.
config QTI_CRYPTO_COMMON
tristate "Enable common crypto functionality used for FBE"
depends on SCSI_UFS_CRYPTO_QTI
help
Say 'Y' to enable the common crypto implementation to be used by
different storage layers such as UFS and EMMC for file based hardware
encryption. This library implements API to program and evict
keys using Trustzone or Hardware Key Manager.
config QTI_CRYPTO_TZ
tristate "Enable Trustzone to be used for FBE"
depends on QTI_CRYPTO_COMMON
help
Say 'Y' to enable routing crypto requests to Trustzone while
performing hardware based file encryption. This means keys are
programmed and managed through SCM calls to TZ where ICE driver
will configure keys.
config QTI_HW_KEY_MANAGER
tristate "Enable QTI Hardware Key Manager for storage encryption"
default n
help
Say 'Y' to enable the hardware key manager driver used to operate
and access key manager hardware block. This is used to interface with
HWKM hardware to perform key operations from the kernel which will
be used for storage encryption.
config QCOM_MINIDUMP
tristate "QCOM Minidump Support"
depends on QCOM_SMEM || ARCH_QTI_VM
@ -736,6 +763,7 @@ config QCOM_ICC_BWMON
memory throughput even with lower CPU frequencies.
source "drivers/soc/qcom/mem_buf/Kconfig"
source "drivers/soc/qcom/tmecom/Kconfig"
config MSM_PERFORMANCE
tristate "msm performance driver to support userspace fmin/fmax request"

View file

@ -52,6 +52,10 @@ obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o
obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
obj-$(CONFIG_QCOM_CPU_VENDOR_HOOKS) += qcom_cpu_vendor_hooks.o
obj-$(CONFIG_QTI_CRYPTO_COMMON) += crypto-qti-common.o
obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o crypto-qti-hwkm.o
obj-$(CONFIG_MSM_TMECOM_QMP) += tmecom/
obj-$(CONFIG_QCOM_MINIDUMP) += minidump.o
minidump-y += msm_minidump.o minidump_log.o
minidump-$(CONFIG_QCOM_MINIDUMP_PANIC_DUMP) += minidump_memory.o

View file

@ -0,0 +1,365 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common crypto library for storage encryption.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/crypto-qti-common.h>
#include <linux/module.h>
#include "crypto-qti-ice-regs.h"
#include "crypto-qti-platform.h"
static int ice_check_fuse_setting(void __iomem *ice_mmio)
{
uint32_t regval;
uint32_t version, major, minor;
version = ice_readl(ice_mmio, ICE_REGS_VERSION);
major = (version & ICE_CORE_MAJOR_REV_MASK) >>
ICE_CORE_MAJOR_REV;
minor = (version & ICE_CORE_MINOR_REV_MASK) >>
ICE_CORE_MINOR_REV;
//Check fuse setting is not supported on ICE 3.2 onwards
if ((major == 0x03) && (minor >= 0x02))
return 0;
regval = ice_readl(ice_mmio, ICE_REGS_FUSE_SETTING);
regval &= (ICE_FUSE_SETTING_MASK |
ICE_FORCE_HW_KEY0_SETTING_MASK |
ICE_FORCE_HW_KEY1_SETTING_MASK);
if (regval) {
pr_err("%s: error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
__func__);
return -EPERM;
}
return 0;
}
static int ice_check_version(void __iomem *ice_mmio)
{
uint32_t version, major, minor, step;
version = ice_readl(ice_mmio, ICE_REGS_VERSION);
major = (version & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
minor = (version & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
step = (version & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
if (major < ICE_CORE_CURRENT_MAJOR_VERSION) {
pr_err("%s: Unknown ICE device at %lu, rev %d.%d.%d\n",
__func__, (unsigned long)ice_mmio,
major, minor, step);
return -ENODEV;
}
return 0;
}
int crypto_qti_init_crypto(void *mmio_data)
{
int err = 0;
void __iomem *ice_mmio = (void __iomem *) mmio_data;
err = ice_check_version(ice_mmio);
if (err) {
pr_err("%s: check version failed, err %d\n", __func__, err);
return err;
}
err = ice_check_fuse_setting(ice_mmio);
if (err)
pr_err("%s: check fuse failed, err %d\n", __func__, err);
return err;
}
EXPORT_SYMBOL(crypto_qti_init_crypto);
static void ice_low_power_and_optimization_enable(void __iomem *ice_mmio)
{
uint32_t regval;
regval = ice_readl(ice_mmio, ICE_REGS_ADVANCED_CONTROL);
/* Enable low power mode sequence
* [0]-0,[1]-0,[2]-0,[3]-7,[4]-0,[5]-0,[6]-0,[7]-0,
* Enable CONFIG_CLK_GATING, STREAM2_CLK_GATING and STREAM1_CLK_GATING
*/
regval |= 0x7000;
/* Optimization enable sequence
*/
regval |= 0xD807100;
ice_writel(ice_mmio, regval, ICE_REGS_ADVANCED_CONTROL);
/*
* Memory barrier - to ensure write completion before next transaction
*/
wmb();
}
static int ice_wait_bist_status(void __iomem *ice_mmio)
{
int count;
uint32_t regval;
for (count = 0; count < QTI_ICE_MAX_BIST_CHECK_COUNT; count++) {
regval = ice_readl(ice_mmio, ICE_REGS_BIST_STATUS);
if (!(regval & ICE_BIST_STATUS_MASK))
break;
udelay(50);
}
if (regval) {
pr_err("%s: wait bist status failed, reg %d\n",
__func__, regval);
return -ETIMEDOUT;
}
return 0;
}
int crypto_qti_enable(void *mmio_data)
{
int err = 0;
void __iomem *ice_mmio = (void __iomem *) mmio_data;
ice_low_power_and_optimization_enable(ice_mmio);
err = ice_wait_bist_status(ice_mmio);
if (err)
return err;
return err;
}
EXPORT_SYMBOL(crypto_qti_enable);
void crypto_qti_disable(void)
{
crypto_qti_disable_platform();
}
EXPORT_SYMBOL(crypto_qti_disable);
int crypto_qti_resume(void *mmio_data)
{
void __iomem *ice_mmio = (void __iomem *) mmio_data;
return ice_wait_bist_status(ice_mmio);
}
EXPORT_SYMBOL(crypto_qti_resume);
static void ice_dump_test_bus(void __iomem *ice_mmio)
{
uint32_t regval = 0x1;
uint32_t val;
uint8_t bus_selector;
uint8_t stream_selector;
pr_err("ICE TEST BUS DUMP:\n");
for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) {
regval = 0x1; /* enable test bus */
regval |= bus_selector << 28;
if (bus_selector == 0xD)
continue;
ice_writel(ice_mmio, regval, ICE_REGS_TEST_BUS_CONTROL);
/*
* make sure test bus selector is written before reading
* the test bus register
*/
wmb();
val = ice_readl(ice_mmio, ICE_REGS_TEST_BUS_REG);
pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
regval, val);
}
pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
regval = 0xD0000001; /* enable stream test bus */
regval |= stream_selector << 16;
ice_writel(ice_mmio, regval, ICE_REGS_TEST_BUS_CONTROL);
/*
* make sure test bus selector is written before reading
* the test bus register
*/
wmb();
val = ice_readl(ice_mmio, ICE_REGS_TEST_BUS_REG);
pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
regval, val);
}
}
int crypto_qti_debug(const struct ice_mmio_data *mmio_data)
{
void __iomem *ice_mmio = mmio_data->ice_base_mmio;
pr_err("ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_CONTROL),
ice_readl(ice_mmio, ICE_REGS_RESET));
pr_err("ICE Version: 0x%08x | ICE FUSE: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_VERSION),
ice_readl(ice_mmio, ICE_REGS_FUSE_SETTING));
pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_PARAMETERS_1),
ice_readl(ice_mmio, ICE_REGS_PARAMETERS_2));
pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_PARAMETERS_3),
ice_readl(ice_mmio, ICE_REGS_PARAMETERS_4));
pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_PARAMETERS_5),
ice_readl(ice_mmio, ICE_REGS_NON_SEC_IRQ_STTS));
pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_NON_SEC_IRQ_MASK),
ice_readl(ice_mmio, ICE_REGS_NON_SEC_IRQ_CLR));
pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
ice_readl(ice_mmio, ICE_INVALID_CCFG_ERR_STTS));
pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_BIST_STATUS),
ice_readl(ice_mmio, ICE_REGS_BYPASS_STATUS));
pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_ADVANCED_CONTROL),
ice_readl(ice_mmio, ICE_REGS_ENDIAN_SWAP));
pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_ERROR_SYNDROME1),
ice_readl(ice_mmio, ICE_REGS_STREAM1_ERROR_SYNDROME2));
pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_ERROR_SYNDROME1),
ice_readl(ice_mmio, ICE_REGS_STREAM2_ERROR_SYNDROME2));
pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS1),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS2));
pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS3),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS4));
pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS1),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS2));
pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS3),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS4));
pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS5_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS5_LSB));
pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS6_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS6_LSB));
pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS7_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS7_LSB));
pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS8_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS8_LSB));
pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS9_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM1_COUNTERS9_LSB));
pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS5_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS5_LSB));
pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS6_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS6_LSB));
pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS7_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS7_LSB));
pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS8_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS8_LSB));
pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS9_MSB),
ice_readl(ice_mmio, ICE_REGS_STREAM2_COUNTERS9_LSB));
ice_dump_test_bus(ice_mmio);
return 0;
}
EXPORT_SYMBOL(crypto_qti_debug);
int crypto_qti_keyslot_program(const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key,
unsigned int slot,
u8 data_unit_mask, int capid)
{
int err = 0;
err = crypto_qti_program_key(mmio_data, key, slot,
data_unit_mask, capid);
if (err) {
pr_err("%s: program key failed with error %d\n", __func__, err);
err = crypto_qti_invalidate_key(mmio_data, slot);
if (err) {
pr_err("%s: invalidate key failed with error %d\n",
__func__, err);
return err;
}
}
return err;
}
EXPORT_SYMBOL(crypto_qti_keyslot_program);
int crypto_qti_keyslot_evict(const struct ice_mmio_data *mmio_data,
unsigned int slot)
{
int err = 0;
err = crypto_qti_invalidate_key(mmio_data, slot);
if (err) {
pr_err("%s: invalidate key failed with error %d\n",
__func__, err);
}
return err;
}
EXPORT_SYMBOL(crypto_qti_keyslot_evict);
int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
int err = 0;
if (wrapped_key_size <= RAW_SECRET_SIZE) {
pr_err("%s: Invalid wrapped_key_size: %u\n",
__func__, wrapped_key_size);
err = -EINVAL;
return err;
}
if (secret_size != RAW_SECRET_SIZE) {
pr_err("%s: Invalid secret size: %u\n", __func__, secret_size);
err = -EINVAL;
return err;
}
if (wrapped_key_size > 64)
err = crypto_qti_derive_raw_secret_platform(wrapped_key,
wrapped_key_size, secret, secret_size);
else
memcpy(secret, wrapped_key, secret_size);
return err;
}
EXPORT_SYMBOL(crypto_qti_derive_raw_secret);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Common crypto library for storage encryption");

View file

@ -0,0 +1,223 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Crypto HWKM library for storage encryption.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
#include <linux/crypto-qti-common.h>
#include <linux/hwkm.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/cacheflush.h>
#include <linux/qcom_scm.h>
#include <linux/qtee_shmbridge.h>
#include "crypto-qti-ice-regs.h"
#include "crypto-qti-platform.h"
#define KEYMANAGER_ICE_MAP_SLOT(slot) ((slot * 2))
union crypto_cfg {
__le32 regval[2];
struct {
u8 dusize;
u8 capidx;
u8 nop;
u8 cfge;
u8 dumb[4];
};
};
static bool qti_hwkm_init_done;
static void print_key(const struct blk_crypto_key *key,
unsigned int slot)
{
int i = 0;
pr_err("%s: Printing key for slot %d\n", __func__, slot);
for (i = 0; i < key->size; i++)
pr_err("key->raw[%d] = 0x%x\n", i, key->raw[i]);
}
static int crypto_qti_program_hwkm_tz(const struct blk_crypto_key *key,
unsigned int slot)
{
int err = 0;
struct qtee_shm shm;
err = qtee_shmbridge_allocate_shm(key->size, &shm);
if (err)
return -ENOMEM;
memcpy(shm.vaddr, key->raw, key->size);
qtee_shmbridge_flush_shm_buf(&shm);
err = qcom_scm_config_set_ice_key(slot, shm.paddr, key->size,
0, 0, 0);
if (err) {
pr_err("%s:SCM call Error for get contents keyblob: 0x%x\n",
__func__, err);
print_key(key, slot);
}
qtee_shmbridge_inv_shm_buf(&shm);
qtee_shmbridge_free_shm(&shm);
return err;
}
int crypto_qti_get_hwkm_raw_secret_tz(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
int err = 0;
struct qtee_shm shm_key, shm_secret;
err = qtee_shmbridge_allocate_shm(wrapped_key_size, &shm_key);
if (err)
return -ENOMEM;
err = qtee_shmbridge_allocate_shm(secret_size, &shm_secret);
if (err) {
qtee_shmbridge_free_shm(&shm_key);
return -ENOMEM;
}
memcpy(shm_key.vaddr, wrapped_key, wrapped_key_size);
qtee_shmbridge_flush_shm_buf(&shm_key);
memset(shm_secret.vaddr, 0, secret_size);
qtee_shmbridge_flush_shm_buf(&shm_secret);
err = qcom_scm_derive_raw_secret(shm_key.paddr, wrapped_key_size,
shm_secret.paddr, secret_size);
if (err) {
pr_err("%s:SCM call error for raw secret: 0x%x\n", __func__, err);
goto exit;
}
qtee_shmbridge_inv_shm_buf(&shm_secret);
memcpy(secret, shm_secret.vaddr, secret_size);
qtee_shmbridge_inv_shm_buf(&shm_key);
exit:
qtee_shmbridge_free_shm(&shm_key);
qtee_shmbridge_free_shm(&shm_secret);
return err;
}
static int crypto_qti_hwkm_evict_slot(unsigned int slot)
{
int err = 0;
err = qcom_scm_clear_ice_key(slot, 0);
if (err)
pr_err("%s:SCM call Error: 0x%x\n", __func__, err);
return err;
}
int crypto_qti_program_key(const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key, unsigned int slot,
unsigned int data_unit_mask, int capid)
{
int err = 0;
union crypto_cfg cfg;
if ((key->size) <= RAW_SECRET_SIZE) {
pr_err("%s: Incorrect key size %d\n", __func__, key->size);
return -EINVAL;
}
if (!qti_hwkm_init_done) {
err = qti_hwkm_init(mmio_data);
if (err) {
pr_err("%s: Error with HWKM init %d\n", __func__, err);
return -EINVAL;
}
qti_hwkm_init_done = true;
}
memset(&cfg, 0, sizeof(cfg));
cfg.dusize = data_unit_mask;
cfg.capidx = capid;
cfg.cfge = 0x80;
ice_writel(mmio_data->ice_base_mmio, 0x0, (ICE_LUT_KEYS_CRYPTOCFG_R_16 +
ICE_LUT_KEYS_CRYPTOCFG_OFFSET*slot));
/* Make sure CFGE is cleared */
wmb();
/*
* Call TZ to get a contents keyblob
* TZ unwraps the derivation key, derives a 512 bit XTS key
* and wraps it with the TP Key. It then unwraps to the ICE slot.
*/
err = crypto_qti_program_hwkm_tz(key, KEYMANAGER_ICE_MAP_SLOT(slot));
if (err) {
pr_err("%s: Error programming hwkm keyblob , err = %d\n",
__func__, err);
goto exit;
}
ice_writel(mmio_data->ice_base_mmio, cfg.regval[0],
(ICE_LUT_KEYS_CRYPTOCFG_R_16 + ICE_LUT_KEYS_CRYPTOCFG_OFFSET*slot));
/* Make sure CFGE is enabled before moving forward */
wmb();
exit:
return err;
}
EXPORT_SYMBOL(crypto_qti_program_key);
int crypto_qti_invalidate_key(const struct ice_mmio_data *mmio_data,
unsigned int slot)
{
int err = 0;
if (!qti_hwkm_init_done)
return 0;
/* Clear key from ICE keyslot */
err = crypto_qti_hwkm_evict_slot(KEYMANAGER_ICE_MAP_SLOT(slot));
if (err)
pr_err("%s: Error with key clear %d, slot %d\n", __func__, err, slot);
return err;
}
EXPORT_SYMBOL(crypto_qti_invalidate_key);
void crypto_qti_disable_platform(void)
{
qti_hwkm_init_done = false;
}
EXPORT_SYMBOL(crypto_qti_disable_platform);
int crypto_qti_derive_raw_secret_platform(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
int err = 0;
/*
* Call TZ to get a raw secret
* TZ unwraps the derivation key, derives a CMAC key
* and then another derivation to get 32 bytes of key data.
*/
err = crypto_qti_get_hwkm_raw_secret_tz(wrapped_key, wrapped_key_size,
secret, secret_size);
if (err) {
pr_err("%s: Error with getting derived contents keyblob , err = %d\n",
__func__, err);
}
return err;
}
EXPORT_SYMBOL(crypto_qti_derive_raw_secret_platform);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto HWKM library for storage encryption");

View file

@ -0,0 +1,156 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_
#define _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_
#include <linux/io.h>
/* Register bits for ICE version */
#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03
#define ICE_CORE_STEP_REV_MASK 0xFFFF
#define ICE_CORE_STEP_REV 0 /* bit 15-0 */
#define ICE_CORE_MAJOR_REV_MASK 0xFF000000
#define ICE_CORE_MAJOR_REV 24 /* bit 31-24 */
#define ICE_CORE_MINOR_REV_MASK 0xFF0000
#define ICE_CORE_MINOR_REV 16 /* bit 23-16 */
#define ICE_BIST_STATUS_MASK (0xF0000000) /* bits 28-31 */
#define ICE_FUSE_SETTING_MASK 0x1
#define ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
#define ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
/* QTI ICE Registers from SWI */
#define ICE_REGS_CONTROL 0x0000
#define ICE_REGS_RESET 0x0004
#define ICE_REGS_VERSION 0x0008
#define ICE_REGS_FUSE_SETTING 0x0010
#define ICE_REGS_PARAMETERS_1 0x0014
#define ICE_REGS_PARAMETERS_2 0x0018
#define ICE_REGS_PARAMETERS_3 0x001C
#define ICE_REGS_PARAMETERS_4 0x0020
#define ICE_REGS_PARAMETERS_5 0x0024
/* QTI ICE v3.X only */
#define ICE_GENERAL_ERR_STTS 0x0040
#define ICE_INVALID_CCFG_ERR_STTS 0x0030
#define ICE_GENERAL_ERR_MASK 0x0044
/* QTI ICE v2.X only */
#define ICE_REGS_NON_SEC_IRQ_STTS 0x0040
#define ICE_REGS_NON_SEC_IRQ_MASK 0x0044
#define ICE_REGS_NON_SEC_IRQ_CLR 0x0048
#define ICE_REGS_STREAM1_ERROR_SYNDROME1 0x0050
#define ICE_REGS_STREAM1_ERROR_SYNDROME2 0x0054
#define ICE_REGS_STREAM2_ERROR_SYNDROME1 0x0058
#define ICE_REGS_STREAM2_ERROR_SYNDROME2 0x005C
#define ICE_REGS_STREAM1_BIST_ERROR_VEC 0x0060
#define ICE_REGS_STREAM2_BIST_ERROR_VEC 0x0064
#define ICE_REGS_STREAM1_BIST_FINISH_VEC 0x0068
#define ICE_REGS_STREAM2_BIST_FINISH_VEC 0x006C
#define ICE_REGS_BIST_STATUS 0x0070
#define ICE_REGS_BYPASS_STATUS 0x0074
#define ICE_REGS_ADVANCED_CONTROL 0x1000
#define ICE_REGS_ENDIAN_SWAP 0x1004
#define ICE_REGS_TEST_BUS_CONTROL 0x1010
#define ICE_REGS_TEST_BUS_REG 0x1014
#define ICE_REGS_STREAM1_COUNTERS1 0x1100
#define ICE_REGS_STREAM1_COUNTERS2 0x1104
#define ICE_REGS_STREAM1_COUNTERS3 0x1108
#define ICE_REGS_STREAM1_COUNTERS4 0x110C
#define ICE_REGS_STREAM1_COUNTERS5_MSB 0x1110
#define ICE_REGS_STREAM1_COUNTERS5_LSB 0x1114
#define ICE_REGS_STREAM1_COUNTERS6_MSB 0x1118
#define ICE_REGS_STREAM1_COUNTERS6_LSB 0x111C
#define ICE_REGS_STREAM1_COUNTERS7_MSB 0x1120
#define ICE_REGS_STREAM1_COUNTERS7_LSB 0x1124
#define ICE_REGS_STREAM1_COUNTERS8_MSB 0x1128
#define ICE_REGS_STREAM1_COUNTERS8_LSB 0x112C
#define ICE_REGS_STREAM1_COUNTERS9_MSB 0x1130
#define ICE_REGS_STREAM1_COUNTERS9_LSB 0x1134
#define ICE_REGS_STREAM2_COUNTERS1 0x1200
#define ICE_REGS_STREAM2_COUNTERS2 0x1204
#define ICE_REGS_STREAM2_COUNTERS3 0x1208
#define ICE_REGS_STREAM2_COUNTERS4 0x120C
#define ICE_REGS_STREAM2_COUNTERS5_MSB 0x1210
#define ICE_REGS_STREAM2_COUNTERS5_LSB 0x1214
#define ICE_REGS_STREAM2_COUNTERS6_MSB 0x1218
#define ICE_REGS_STREAM2_COUNTERS6_LSB 0x121C
#define ICE_REGS_STREAM2_COUNTERS7_MSB 0x1220
#define ICE_REGS_STREAM2_COUNTERS7_LSB 0x1224
#define ICE_REGS_STREAM2_COUNTERS8_MSB 0x1228
#define ICE_REGS_STREAM2_COUNTERS8_LSB 0x122C
#define ICE_REGS_STREAM2_COUNTERS9_MSB 0x1230
#define ICE_REGS_STREAM2_COUNTERS9_LSB 0x1234
#define ICE_STREAM1_PREMATURE_LBA_CHANGE (1L << 0)
#define ICE_STREAM2_PREMATURE_LBA_CHANGE (1L << 1)
#define ICE_STREAM1_NOT_EXPECTED_LBO (1L << 2)
#define ICE_STREAM2_NOT_EXPECTED_LBO (1L << 3)
#define ICE_STREAM1_NOT_EXPECTED_DUN (1L << 4)
#define ICE_STREAM2_NOT_EXPECTED_DUN (1L << 5)
#define ICE_STREAM1_NOT_EXPECTED_DUS (1L << 6)
#define ICE_STREAM2_NOT_EXPECTED_DUS (1L << 7)
#define ICE_STREAM1_NOT_EXPECTED_DBO (1L << 8)
#define ICE_STREAM2_NOT_EXPECTED_DBO (1L << 9)
#define ICE_STREAM1_NOT_EXPECTED_ENC_SEL (1L << 10)
#define ICE_STREAM2_NOT_EXPECTED_ENC_SEL (1L << 11)
#define ICE_STREAM1_NOT_EXPECTED_CONF_IDX (1L << 12)
#define ICE_STREAM2_NOT_EXPECTED_CONF_IDX (1L << 13)
#define ICE_STREAM1_NOT_EXPECTED_NEW_TRNS (1L << 14)
#define ICE_STREAM2_NOT_EXPECTED_NEW_TRNS (1L << 15)
#define ICE_NON_SEC_IRQ_MASK \
(ICE_STREAM1_PREMATURE_LBA_CHANGE |\
ICE_STREAM2_PREMATURE_LBA_CHANGE |\
ICE_STREAM1_NOT_EXPECTED_LBO |\
ICE_STREAM2_NOT_EXPECTED_LBO |\
ICE_STREAM1_NOT_EXPECTED_DUN |\
ICE_STREAM2_NOT_EXPECTED_DUN |\
ICE_STREAM2_NOT_EXPECTED_DUS |\
ICE_STREAM1_NOT_EXPECTED_DBO |\
ICE_STREAM2_NOT_EXPECTED_DBO |\
ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
/* QTI ICE registers from secure side */
#define ICE_TEST_BUS_REG_SECURE_INTR (1L << 28)
#define ICE_TEST_BUS_REG_NON_SECURE_INTR (1L << 2)
#define ICE_LUT_KEYS_CRYPTOCFG_R_16 0x4040
#define ICE_LUT_KEYS_CRYPTOCFG_R_17 0x4044
#define ICE_LUT_KEYS_CRYPTOCFG_OFFSET 0x80
#define ICE_LUT_KEYS_ICE_SEC_IRQ_STTS 0x6200
#define ICE_LUT_KEYS_ICE_SEC_IRQ_MASK 0x6204
#define ICE_LUT_KEYS_ICE_SEC_IRQ_CLR 0x6208
#define ICE_STREAM1_PARTIALLY_SET_KEY_USED (1L << 0)
#define ICE_STREAM2_PARTIALLY_SET_KEY_USED (1L << 1)
#define ICE_QTIC_DBG_OPEN_EVENT (1L << 30)
#define ICE_KEYS_RAM_RESET_COMPLETED (1L << 31)
#define ICE_SEC_IRQ_MASK \
(ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
ICE_QTIC_DBG_OPEN_EVENT | \
ICE_KEYS_RAM_RESET_COMPLETED)
#define ice_writel(ice_mmio, val, reg) \
writel_relaxed((val), ice_mmio + (reg))
#define ice_readl(ice_mmio, reg) \
readl_relaxed(ice_mmio + (reg))
#endif /* _CRYPTO_INLINE_CRYPTO_ENGINE_REGS_H_ */

View file

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CRYPTO_QTI_PLATFORM_H
#define _CRYPTO_QTI_PLATFORM_H
#include <linux/blk-crypto.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/device.h>
#if IS_ENABLED(CONFIG_QTI_CRYPTO_COMMON)
int crypto_qti_program_key(const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key,
unsigned int slot,
unsigned int data_unit_mask, int capid);
int crypto_qti_invalidate_key(const struct ice_mmio_data *mmio_data,
unsigned int slot);
int crypto_qti_derive_raw_secret_platform(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size);
#if IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER)
void crypto_qti_disable_platform(void);
#else
static inline void crypto_qti_disable_platform(void)
{}
#endif /* CONFIG_QTI_HW_KEY_MANAGER */
#else
static inline int crypto_qti_program_key(
const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key,
unsigned int slot,
unsigned int data_unit_mask, int capid)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_invalidate_key(
const struct ice_mmio_data *mmio_data, unsigned int slot)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_derive_raw_secret_platform(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
return -EOPNOTSUPP;
}
static inline void crypto_qti_disable_platform(void)
{}
#endif /* CONFIG_QTI_CRYPTO_TZ || CONFIG_QTI_HW_KEY_MANAGER */
#endif /* _CRYPTO_QTI_PLATFORM_H */

View file

@ -0,0 +1,101 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Crypto TZ library for storage encryption.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cacheflush.h>
#include <linux/qcom_scm.h>
#include <linux/qtee_shmbridge.h>
#include <linux/crypto-qti-common.h>
#include <linux/module.h>
#include "crypto-qti-platform.h"
#define ICE_CIPHER_MODE_XTS_256 3
#define UFS_CE 10
#define SDCC_CE 20
#define UFS_CARD_CE 30
int crypto_qti_program_key(const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key, unsigned int slot,
unsigned int data_unit_mask, int capid)
{
int err = 0;
struct qtee_shm shm;
err = qtee_shmbridge_allocate_shm(key->size, &shm);
if (err)
return -ENOMEM;
memcpy(shm.vaddr, key->raw, key->size);
qtee_shmbridge_flush_shm_buf(&shm);
err = qcom_scm_config_set_ice_key(slot, shm.paddr, key->size,
ICE_CIPHER_MODE_XTS_256,
data_unit_mask, UFS_CE);
if (err)
pr_err("%s:SCM call Error: 0x%x slot %d\n",
__func__, err, slot);
qtee_shmbridge_inv_shm_buf(&shm);
qtee_shmbridge_free_shm(&shm);
return err;
}
EXPORT_SYMBOL(crypto_qti_program_key);
int crypto_qti_invalidate_key(const struct ice_mmio_data *mmio_data,
unsigned int slot)
{
int err = 0;
err = qcom_scm_clear_ice_key(slot, UFS_CE);
if (err)
pr_err("%s:SCM call Error: 0x%x\n", __func__, err);
return err;
}
EXPORT_SYMBOL(crypto_qti_invalidate_key);
int crypto_qti_derive_raw_secret_platform(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
int err = 0;
struct qtee_shm shm_key, shm_secret;
err = qtee_shmbridge_allocate_shm(wrapped_key_size, &shm_key);
if (err)
return -ENOMEM;
err = qtee_shmbridge_allocate_shm(secret_size, &shm_secret);
if (err)
return -ENOMEM;
memcpy(shm_key.vaddr, wrapped_key, wrapped_key_size);
qtee_shmbridge_flush_shm_buf(&shm_key);
memset(shm_secret.vaddr, 0, secret_size);
qtee_shmbridge_flush_shm_buf(&shm_secret);
err = qcom_scm_derive_raw_secret(shm_key.paddr, wrapped_key_size,
shm_secret.paddr, secret_size);
if (err) {
pr_err("%s:SCM call Error for derive raw secret: 0x%x\n",
__func__, err);
}
qtee_shmbridge_inv_shm_buf(&shm_secret);
memcpy(secret, shm_secret.vaddr, secret_size);
qtee_shmbridge_inv_shm_buf(&shm_key);
qtee_shmbridge_free_shm(&shm_key);
qtee_shmbridge_free_shm(&shm_secret);
return err;
}
EXPORT_SYMBOL(crypto_qti_derive_raw_secret_platform);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto TZ library for storage encryption");

317
drivers/soc/qcom/hwkm.c Normal file
View file

@ -0,0 +1,317 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* QTI hardware key manager driver.
*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <linux/bitops.h>
#include <linux/iommu.h>
#include <linux/hwkm.h>
#include <linux/tme_hwkm_master.h>
#include "hwkmregs.h"
#include "hwkm_serialize.h"
#include "crypto-qti-ice-regs.h"
#define ASYNC_CMD_HANDLING false
// Maximum number of times to poll
#define MAX_RETRIES 1000
int retries;
#define WAIT_UNTIL(cond) \
for (retries = 0; !(cond) && (retries < MAX_RETRIES); retries++)
#define ICE_SLAVE_TPKEY_VAL 0x18C
#define qti_hwkm_readl(hwkm_base, reg, dest) \
(readl_relaxed(hwkm_base + (reg)))
#define qti_hwkm_writel(hwkm_base, val, reg, dest) \
(writel_relaxed((val), hwkm_base + (reg)))
#define qti_hwkm_setb(hwkm_base, reg, nr, dest) { \
u32 val = qti_hwkm_readl(hwkm_base, reg, dest); \
val |= (0x1 << nr); \
qti_hwkm_writel(hwkm_base, val, reg, dest); \
}
#define qti_hwkm_clearb(hwkm_base, reg, nr, dest) { \
u32 val = qti_hwkm_readl(hwkm_base, reg, dest); \
val &= ~(0x1 << nr); \
qti_hwkm_writel(hwkm_base, val, reg, dest); \
}
static inline bool qti_hwkm_testb(void __iomem *ice_hwkm_mmio, u32 reg, u8 nr,
enum hwkm_destination dest)
{
u32 val = qti_hwkm_readl(ice_hwkm_mmio, reg, dest);
val = (val >> nr) & 0x1;
if (val == 0)
return false;
return true;
}
static inline unsigned int qti_hwkm_get_reg_data(void __iomem *ice_hwkm_mmio,
u32 reg, u32 offset, u32 mask,
enum hwkm_destination dest)
{
u32 val = 0;
val = qti_hwkm_readl(ice_hwkm_mmio, reg, dest);
return ((val & mask) >> offset);
}
static void print_err_info(struct tme_ext_err_info *err)
{
pr_err("printing tme hwkm error response\n");
pr_err("tme_err_status = %d\n", err->tme_err_status);
pr_err("seq_err_status = %d\n", err->seq_err_status);
pr_err("seq_kp_err_status0 = %d\n", err->seq_kp_err_status0);
pr_err("seq_kp_err_status1 = %d\n", err->seq_kp_err_status1);
}
static int qti_handle_set_tpkey(const struct hwkm_cmd *cmd_in,
struct hwkm_rsp *rsp_in)
{
int status = 0;
int retries = 0;
struct tme_ext_err_info errinfo = {0};
if (cmd_in->dest != KM_MASTER) {
pr_err("Invalid dest %d, only master supported\n",
cmd_in->dest);
return -EINVAL;
}
status = tme_hwkm_master_broadcast_transportkey(&errinfo);
if (status) {
if ((status == -ENODEV) || (status == -EAGAIN)) {
while (((status == -ENODEV) || (status == -EAGAIN)) &&
(retries < MAX_RETRIES)) {
usleep_range(8000, 12000);
status =
tme_hwkm_master_broadcast_transportkey(
&errinfo);
if (status == 0)
goto ret;
retries++;
}
}
pr_err("Err in tme hwkm tpkey call, sts = %d\n", status);
print_err_info(&errinfo);
}
ret:
return status;
}
int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp)
{
switch (cmd->op) {
case SET_TPKEY:
return qti_handle_set_tpkey(cmd, rsp);
case KEY_UNWRAP_IMPORT:
case KEY_SLOT_CLEAR:
case KEY_SLOT_RDWR:
case SYSTEM_KDF:
case NIST_KEYGEN:
case KEY_WRAP_EXPORT:
case QFPROM_KEY_RDWR: // cmd for HW initialization cmd only
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(qti_hwkm_handle_cmd);
static void qti_hwkm_configure_slot_access(const struct ice_mmio_data *mmio_data)
{
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, 0xffffffff,
QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0, ICE_SLAVE);
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, 0xffffffff,
QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1, ICE_SLAVE);
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, 0xffffffff,
QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2, ICE_SLAVE);
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, 0xffffffff,
QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3, ICE_SLAVE);
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, 0xffffffff,
QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4, ICE_SLAVE);
}
static int qti_hwkm_check_bist_status(const struct ice_mmio_data *mmio_data)
{
if (!qti_hwkm_testb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
BIST_DONE, ICE_SLAVE)) {
pr_err("%s: Error with BIST_DONE\n", __func__);
return -EINVAL;
}
if (!qti_hwkm_testb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
CRYPTO_LIB_BIST_DONE, ICE_SLAVE)) {
pr_err("%s: Error with CRYPTO_LIB_BIST_DONE\n", __func__);
return -EINVAL;
}
if (!qti_hwkm_testb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
BOOT_CMD_LIST1_DONE, ICE_SLAVE)) {
pr_err("%s: Error with BOOT_CMD_LIST1_DONE\n", __func__);
return -EINVAL;
}
if (!qti_hwkm_testb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
BOOT_CMD_LIST0_DONE, ICE_SLAVE)) {
pr_err("%s: Error with BOOT_CMD_LIST0_DONE\n", __func__);
return -EINVAL;
}
if (!qti_hwkm_testb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_STATUS,
KT_CLEAR_DONE, ICE_SLAVE)) {
pr_err("%s: KT_CLEAR_DONE\n", __func__);
return -EINVAL;
}
return 0;
}
static int qti_hwkm_ice_init_sequence(const struct ice_mmio_data *mmio_data)
{
int ret = 0;
u32 val = 0;
//Put ICE in standard mode
val = ice_readl(mmio_data->ice_base_mmio, ICE_REGS_CONTROL);
val = val & 0xFFFFFFFE;
ice_writel(mmio_data->ice_base_mmio, val, ICE_REGS_CONTROL);
/* Write memory barrier */
wmb();
pr_debug("%s: ICE_REGS_CONTROL = 0x%x\n", __func__,
ice_readl(mmio_data->ice_base_mmio, ICE_REGS_CONTROL));
ret = qti_hwkm_check_bist_status(mmio_data);
if (ret) {
pr_err("%s: Error in BIST initialization %d\n", __func__, ret);
return ret;
}
// Disable CRC checks
qti_hwkm_clearb(mmio_data->ice_hwkm_mmio, QTI_HWKM_ICE_RG_TZ_KM_CTL,
CRC_CHECK_EN, ICE_SLAVE);
/* Write memory barrier */
wmb();
// Configure key slots to be accessed by HLOS
qti_hwkm_configure_slot_access(mmio_data);
/* Write memory barrier */
wmb();
// Clear RSP_FIFO_FULL bit
qti_hwkm_setb(mmio_data->ice_hwkm_mmio,
QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS,
RSP_FIFO_FULL, ICE_SLAVE);
/* Write memory barrier */
wmb();
return ret;
}
static void qti_hwkm_enable_slave_receive_mode(
const struct ice_mmio_data *mmio_data)
{
qti_hwkm_clearb(mmio_data->ice_hwkm_mmio,
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, TPKEY_EN, ICE_SLAVE);
/* Write memory barrier */
wmb();
qti_hwkm_writel(mmio_data->ice_hwkm_mmio, ICE_SLAVE_TPKEY_VAL,
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, ICE_SLAVE);
/* Write memory barrier */
wmb();
}
static void qti_hwkm_disable_slave_receive_mode(
const struct ice_mmio_data *mmio_data)
{
qti_hwkm_clearb(mmio_data->ice_hwkm_mmio,
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL, TPKEY_EN, ICE_SLAVE);
/* Write memory barrier */
wmb();
}
static void qti_hwkm_check_tpkey_status(const struct ice_mmio_data *mmio_data)
{
int val = 0;
val = qti_hwkm_readl(mmio_data->ice_hwkm_mmio,
QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS, ICE_SLAVE);
pr_debug("%s: Tpkey receive status 0x%x\n", __func__, val);
}
static int qti_hwkm_set_tpkey(const struct ice_mmio_data *mmio_data)
{
int err = 0;
struct hwkm_cmd cmd_settpkey = {0};
struct hwkm_rsp rsp_settpkey = {0};
qti_hwkm_enable_slave_receive_mode(mmio_data);
cmd_settpkey.op = SET_TPKEY;
cmd_settpkey.dest = KM_MASTER;
err = qti_hwkm_handle_cmd(&cmd_settpkey, &rsp_settpkey);
if (err) {
pr_err("%s: Error with Set TP key in master %d\n", __func__,
err);
return -EINVAL;
}
qti_hwkm_check_tpkey_status(mmio_data);
qti_hwkm_disable_slave_receive_mode(mmio_data);
return 0;
}
int qti_hwkm_init(const struct ice_mmio_data *mmio_data)
{
int ret = 0;
pr_debug("%s %d: HWKM init starts\n", __func__, __LINE__);
if (!mmio_data->ice_hwkm_mmio || !mmio_data->ice_base_mmio) {
pr_err("%s: HWKM ICE slave mmio invalid\n", __func__);
return -EINVAL;
}
ret = qti_hwkm_ice_init_sequence(mmio_data);
if (ret) {
pr_err("%s: Error in ICE init sequence %d\n", __func__, ret);
return ret;
}
ret = qti_hwkm_set_tpkey(mmio_data);
if (ret) {
pr_err("%s: Error setting ICE to receive %d\n", __func__, ret);
return ret;
}
/* Write memory barrier */
wmb();
pr_debug("%s %d: HWKM init ends\n", __func__, __LINE__);
return ret;
}
EXPORT_SYMBOL(qti_hwkm_init);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("QTI Hardware Key Manager library");

View file

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HWKM_SERIALIZE_H_
#define __HWKM_SERIALIZE_H_
#include <stdbool.h>
#include <stddef.h>
#include <linux/hwkm.h>
/* Command lengths (words) */
#define NIST_KEYGEN_CMD_WORDS 4
#define SYSTEM_KDF_CMD_MIN_WORDS 4
#define SYSTEM_KDF_CMD_MAX_WORDS 29
#define KEYSLOT_CLEAR_CMD_WORDS 2
#define UNWRAP_IMPORT_CMD_WORDS 25
#define WRAP_EXPORT_CMD_WORDS 5
#define SET_TPKEY_CMD_WORDS 2
#define KEYSLOT_RDWR_CMD_WORDS 20
#define QFPROM_RDWR_CMD_WORDS 2
/* Response lengths (words) */
#define NIST_KEYGEN_RSP_WORDS 2
#define SYSTEM_KDF_RSP_WORDS 2
#define KEYSLOT_CLEAR_RSP_WORDS 2
#define UNWRAP_IMPORT_RSP_WORDS 2
#define WRAP_EXPORT_RSP_WORDS 19
#define SET_TPKEY_RSP_WORDS 2
#define KEYSLOT_RDWR_RSP_WORDS 21
#define QFPROM_RDWR_RSP_WORDS 2
/* Field lengths (words) */
#define OPERATION_INFO_WORDS 1
#define KEY_POLICY_WORDS 2
#define BSVE_WORDS 3
#define MAX_SWC_WORDS 16
#define RESPONSE_KEY_WORDS 16
#define KEY_BLOB_WORDS 17
/* Field lengths (bytes) */
#define UNWRAP_CMD_LENGTH (UNWRAP_IMPORT_CMD_WORDS * sizeof(uint32_t))
#define UNWRAP_RSP_LENGTH (UNWRAP_IMPORT_RSP_WORDS * sizeof(uint32_t))
#define OPERATION_INFO_LENGTH (OPERATION_INFO_WORDS * sizeof(uint32_t))
#define KEY_POLICY_LENGTH (KEY_POLICY_WORDS * sizeof(uint32_t))
#define MAX_BSVE_LENGTH (BSVE_WORDS * sizeof(uint32_t))
#define MAX_SWC_LENGTH (MAX_SWC_WORDS * sizeof(uint32_t))
#define RESPONSE_KEY_LENGTH (RESPONSE_KEY_WORDS * sizeof(uint32_t))
#define KEY_BLOB_LENGTH (KEY_BLOB_WORDS * sizeof(uint32_t))
/* Command indices */
#define COMMAND_KEY_POLICY_IDX 1
#define COMMAND_KEY_VALUE_IDX 3
#define COMMAND_WRAPPED_KEY_IDX 1
#define COMMAND_KEY_WRAP_BSVE_IDX 1
/* Response indices */
#define RESPONSE_ERR_IDX 1
#define RESPONSE_KEY_POLICY_IDX 2
#define RESPONSE_KEY_VALUE_IDX 4
#define RESPONSE_WRAPPED_KEY_IDX 2
struct hwkm_serialized_policy {
unsigned dbg_qfprom_key_rd_iv_sel:1; // [0]
unsigned reserved0:1; // [1]
unsigned wrap_with_tpkey:1; // [2]
unsigned hw_destination:4; // [3:6]
unsigned reserved1:1; // [7]
unsigned propagate_sec_level_to_child_keys:1; // [8]
unsigned security_level:2; // [9:10]
unsigned swap_export_allowed:1; // [11]
unsigned wrap_export_allowed:1; // [12]
unsigned key_type:3; // [13:15]
unsigned kdf_depth:8; // [16:23]
unsigned decrypt_allowed:1; // [24]
unsigned encrypt_allowed:1; // [25]
unsigned alg_allowed:6; // [26:31]
unsigned key_management_by_tz_secure_allowed:1; // [32]
unsigned key_management_by_nonsecure_allowed:1; // [33]
unsigned key_management_by_modem_allowed:1; // [34]
unsigned key_management_by_spu_allowed:1; // [35]
unsigned reserved2:28; // [36:63]
} __packed;
struct hwkm_kdf_bsve {
unsigned mks:8; // [0:7]
unsigned key_policy_version_en:1; // [8]
unsigned apps_secure_en:1; // [9]
unsigned msa_secure_en:1; // [10]
unsigned lcm_fuse_row_en:1; // [11]
unsigned boot_stage_otp_en:1; // [12]
unsigned swc_en:1; // [13]
u64 fuse_region_sha_digest_en:64; // [14:78]
unsigned child_key_policy_en:1; // [79]
unsigned mks_en:1; // [80]
unsigned reserved:16; // [81:95]
} __packed;
struct hwkm_wrapping_bsve {
unsigned key_policy_version_en:1; // [0]
unsigned apps_secure_en:1; // [1]
unsigned msa_secure_en:1; // [2]
unsigned lcm_fuse_row_en:1; // [3]
unsigned boot_stage_otp_en:1; // [4]
unsigned swc_en:1; // [5]
u64 fuse_region_sha_digest_en:64; // [6:69]
unsigned child_key_policy_en:1; // [70]
unsigned mks_en:1; // [71]
unsigned reserved:24; // [72:95]
} __packed;
struct hwkm_operation_info {
unsigned op:4; // [0-3]
unsigned irq_en:1; // [4]
unsigned slot1_desc:8; // [5,12]
unsigned slot2_desc:8; // [13,20]
unsigned op_flag:1; // [21]
unsigned context_len:5; // [22-26]
unsigned len:5; // [27-31]
} __packed;
#endif /* __HWKM_SERIALIZE_H_ */

139
drivers/soc/qcom/hwkmregs.h Normal file
View file

@ -0,0 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _QTI_HARDWARE_KEY_MANAGER_REGS_H_
#define _QTI_HARDWARE_KEY_MANAGER_REGS_H_
#define HWKM_VERSION_STEP_REV_MASK 0xFFFF
#define HWKM_VERSION_STEP_REV 0 /* bit 15-0 */
#define HWKM_VERSION_MAJOR_REV_MASK 0xFF000000
#define HWKM_VERSION_MAJOR_REV 24 /* bit 31-24 */
#define HWKM_VERSION_MINOR_REV_MASK 0xFF0000
#define HWKM_VERSION_MINOR_REV 16 /* bit 23-16 */
/* QTI HWKM ICE slave config and status registers */
#define QTI_HWKM_ICE_RG_TZ_KM_CTL 0x1000
#define QTI_HWKM_ICE_RG_TZ_KM_STATUS 0x1004
#define QTI_HWKM_ICE_RG_TZ_KM_STATUS_IRQ_MASK 0x1008
#define QTI_HWKM_ICE_RG_TZ_KM_BOOT_STAGE_OTP 0x100C
#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_CTL 0x1010
#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_WRITE 0x1014
#define QTI_HWKM_ICE_RG_TZ_KM_DEBUG_READ 0x1018
#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_CTL 0x101C
#define QTI_HWKM_ICE_RG_TZ_TPKEY_RECEIVE_STATUS 0x1020
#define QTI_HWKM_ICE_RG_TZ_KM_COMMON_IRQ_ROUTING 0x1024
/* QTI HWKM ICE slave registers from SWI */
/* QTI HWKM ICE slave shared registers */
#define QTI_HWKM_ICE_RG_IPCAT_VERSION 0x0000
#define QTI_HWKM_ICE_RG_KEY_POLICY_VERSION 0x0004
#define QTI_HWKM_ICE_RG_SHARED_STATUS 0x0008
#define QTI_HWKM_ICE_RG_KEYTABLE_SIZE 0x000C
/* QTI HWKM ICE slave register bank 0 */
#define QTI_HWKM_ICE_RG_BANK0_BANKN_CTL 0x2000
#define QTI_HWKM_ICE_RG_BANK0_BANKN_STATUS 0x2004
#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_STATUS 0x2008
#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_MASK 0x200C
#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR 0x2010
#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESR_IRQ_MASK 0x2014
#define QTI_HWKM_ICE_RG_BANK0_BANKN_ESYNR 0x2018
#define QTI_HWKM_ICE_RG_BANK0_CMD_0 0x201C
#define QTI_HWKM_ICE_RG_BANK0_CMD_1 0x2020
#define QTI_HWKM_ICE_RG_BANK0_CMD_2 0x2024
#define QTI_HWKM_ICE_RG_BANK0_CMD_3 0x2028
#define QTI_HWKM_ICE_RG_BANK0_CMD_4 0x202C
#define QTI_HWKM_ICE_RG_BANK0_CMD_5 0x2030
#define QTI_HWKM_ICE_RG_BANK0_CMD_6 0x2034
#define QTI_HWKM_ICE_RG_BANK0_CMD_7 0x2038
#define QTI_HWKM_ICE_RG_BANK0_CMD_8 0x203C
#define QTI_HWKM_ICE_RG_BANK0_CMD_9 0x2040
#define QTI_HWKM_ICE_RG_BANK0_CMD_10 0x2044
#define QTI_HWKM_ICE_RG_BANK0_CMD_11 0x2048
#define QTI_HWKM_ICE_RG_BANK0_CMD_12 0x204C
#define QTI_HWKM_ICE_RG_BANK0_CMD_13 0x2050
#define QTI_HWKM_ICE_RG_BANK0_CMD_14 0x2054
#define QTI_HWKM_ICE_RG_BANK0_CMD_15 0x2058
#define QTI_HWKM_ICE_RG_BANK0_RSP_0 0x205C
#define QTI_HWKM_ICE_RG_BANK0_RSP_1 0x2060
#define QTI_HWKM_ICE_RG_BANK0_RSP_2 0x2064
#define QTI_HWKM_ICE_RG_BANK0_RSP_3 0x2068
#define QTI_HWKM_ICE_RG_BANK0_RSP_4 0x206C
#define QTI_HWKM_ICE_RG_BANK0_RSP_5 0x2070
#define QTI_HWKM_ICE_RG_BANK0_RSP_6 0x2074
#define QTI_HWKM_ICE_RG_BANK0_RSP_7 0x2078
#define QTI_HWKM_ICE_RG_BANK0_RSP_8 0x207C
#define QTI_HWKM_ICE_RG_BANK0_RSP_9 0x2080
#define QTI_HWKM_ICE_RG_BANK0_RSP_10 0x2084
#define QTI_HWKM_ICE_RG_BANK0_RSP_11 0x2088
#define QTI_HWKM_ICE_RG_BANK0_RSP_12 0x208C
#define QTI_HWKM_ICE_RG_BANK0_RSP_13 0x2090
#define QTI_HWKM_ICE_RG_BANK0_RSP_14 0x2094
#define QTI_HWKM_ICE_RG_BANK0_RSP_15 0x2098
#define QTI_HWKM_ICE_RG_BANK0_BANKN_IRQ_ROUTING 0x209C
#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_0 0x20A0
#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_1 0x20A4
#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_2 0x20A8
#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_3 0x20AC
#define QTI_HWKM_ICE_RG_BANK0_BANKN_BBAC_4 0x20B0
/* QTI HWKM access control registers for Bank 2 */
#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_0 0x5000
#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_1 0x5004
#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_2 0x5008
#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_3 0x500C
#define QTI_HWKM_ICE_RG_BANK0_AC_BANKN_BBAC_4 0x5010
/* QTI HWKM ICE slave config reg vals */
/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_CTL */
#define CRC_CHECK_EN 0
#define KEYTABLE_HW_WR_ACCESS_EN 1
#define KEYTABLE_HW_RD_ACCESS_EN 2
#define BOOT_INIT0_DISABLE 3
#define BOOT_INIT1_DISABLE 4
#define ICE_LEGACY_MODE_EN_OTP 5
/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_KM_STATUS */
#define KT_CLEAR_DONE 0
#define BOOT_CMD_LIST0_DONE 1
#define BOOT_CMD_LIST1_DONE 2
#define LAST_ACTIVITY_BANK 3
#define CRYPTO_LIB_BIST_ERROR 6
#define CRYPTO_LIB_BIST_DONE 7
#define BIST_ERROR 8
#define BIST_DONE 9
#define LAST_ACTIVITY_BANK_MASK 0x38
/* HWKM_ICEMEM_SLAVE_ICE_KM_RG_TZ_TPKEY_RECEIVE_CTL */
#define TPKEY_EN 8
/* QTI HWKM Bank status & control reg vals */
/* HWKM_MASTER_CFG_KM_BANKN_CTL */
#define CMD_ENABLE_BIT 0
#define CMD_FIFO_CLEAR_BIT 1
/* HWKM_MASTER_CFG_KM_BANKN_STATUS */
#define CURRENT_CMD_REMAINING_LENGTH 0
#define MOST_RECENT_OPCODE 5
#define RSP_FIFO_AVAILABLE_DATA 9
#define CMD_FIFO_AVAILABLE_SPACE 14
#define ICE_LEGACY_MODE_BIT 19
#define CMD_FIFO_AVAILABLE_SPACE_MASK 0x7c000
#define RSP_FIFO_AVAILABLE_DATA_MASK 0x3e00
#define MOST_RECENT_OPCODE_MASK 0x1e0
#define CURRENT_CMD_REMAINING_LENGTH_MASK 0x1f
/* HWKM_MASTER_CFG_KM_BANKN_IRQ_STATUS */
#define ARB_GRAN_WINNER 0
#define CMD_DONE_BIT 1
#define RSP_FIFO_NOT_EMPTY 2
#define RSP_FIFO_FULL 3
#define RSP_FIFO_UNDERFLOW 4
#define CMD_FIFO_UNDERFLOW 5
#endif /* __QTI_HARDWARE_KEY_MANAGER_REGS_H_ */

View file

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# QCOM TMECOM driver
config MSM_TMECOM_QMP
tristate "TMECom QMP interface for HWKM master"
select MSM_QMP
help
Set to enable TMECom interface over QTI Mailbox
Protocol (QMP) for HWKM master on MSM platform.
This interface helps to comminicate with TME
mainly for HWKM functionality.

View file

@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MSM_TMECOM_QMP) := tmecom-intf.o
tmecom-intf-objs := tmecom.o tme_hwkm_master.o

View file

@ -0,0 +1,404 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "tme_hwkm_master_intf.h"
#include "tmecom.h"
#define TME_MSG_CBOR_TAG_HWKM (303)
#define TME_CLEAR_KEY_CBOR_TAG 0x482F01D9 /* _be32 0xD9012F48 */
#define TME_DERIVE_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */
#define TME_GENERATE_KEY_CBOR_TAG 0x542F01D9 /* _be32 0xD9012F54 */
#define TME_IMPORT_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */
#define TME_WRAP_KEY_CBOR_TAG 0x502F01D9 /* _be32 0xD9012F50 */
#define TME_UNWRAP_KEY_CBOR_TAG 0x582F01D9 /* _be32 0xD9012F58 */
#define TME_BORADCAST_KEY_CBOR_TAG 0x442F01D9 /* _be32 0xD9012F44 */
/*
* Static alloc for wrapped key
* Protected by tmecom dev mutex
*/
static struct wrap_key_resp gwrpk_response = {0};
static inline uint32_t update_ext_err(
struct tme_ext_err_info *err_info,
struct tme_response_sts *result)
{
bool is_failure = false;
err_info->tme_err_status = result->tme_err_status;
err_info->seq_err_status = result->seq_err_status;
err_info->seq_kp_err_status0 = result->seq_kp_err_status0;
err_info->seq_kp_err_status1 = result->seq_kp_err_status1;
err_info->seq_rsp_status = result->seq_rsp_status;
is_failure = err_info->tme_err_status ||
err_info->seq_err_status ||
err_info->seq_kp_err_status0 ||
err_info->seq_kp_err_status1;
print_hex_dump_bytes("err_info decoded bytes : ",
DUMP_PREFIX_ADDRESS, (void *)err_info,
sizeof(*err_info));
return is_failure ? 1 : 0;
}
uint32_t tme_hwkm_master_clearkey(uint32_t key_id,
struct tme_ext_err_info *err_info)
{
struct clear_key_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!err_info)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cmd.code = TME_HWKM_CMD_CLEAR_KEY;
request->key_id = key_id;
request->cbor_header = TME_CLEAR_KEY_CBOR_TAG;
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM clear key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_clearkey);
uint32_t tme_hwkm_master_generatekey(uint32_t key_id,
struct tme_key_policy *policy,
uint32_t cred_slot,
struct tme_ext_err_info *err_info)
{
struct gen_key_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!err_info || !policy)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cmd.code = TME_HWKM_CMD_GENERATE_KEY;
request->key_id = key_id;
request->cred_slot = cred_slot;
request->cbor_header = TME_GENERATE_KEY_CBOR_TAG;
memcpy(&request->key_policy, policy, sizeof(*policy));
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM generate key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_generatekey);
uint32_t tme_hwkm_master_derivekey(uint32_t key_id,
struct tme_kdf_spec *kdf_info,
uint32_t cred_slot,
struct tme_ext_err_info *err_info)
{
struct derive_key_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!kdf_info || !err_info)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cmd.code = TME_HWKM_CMD_DERIVE_KEY;
request->key_id = key_id;
request->cred_slot = cred_slot;
request->cbor_header = TME_DERIVE_KEY_CBOR_TAG;
memcpy(&request->kdf_info, kdf_info, sizeof(*kdf_info));
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM derive key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_derivekey);
uint32_t tme_hwkm_master_wrapkey(uint32_t key_id,
uint32_t targetkey_id,
uint32_t cred_slot,
struct tme_wrapped_key *wrapped,
struct tme_ext_err_info *err_info)
{
struct wrap_key_req *request = NULL;
struct wrap_key_resp *wrpk_response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*wrpk_response);
if (!wrapped || !err_info)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
wrpk_response = &gwrpk_response;
if (!request)
return -ENOMEM;
request->cmd.code = TME_HWKM_CMD_WRAP_KEY;
request->key_id = key_id;
request->target_key_id = targetkey_id;
request->cbor_header = TME_WRAP_KEY_CBOR_TAG;
ret = tmecom_process_request(request, sizeof(*request), wrpk_response,
&response_len);
if (ret != 0) {
pr_err("HWKM wrap key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*wrpk_response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(wrpk_response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, &wrpk_response->status);
if (!ret)
memcpy(wrapped, &wrpk_response->wrapped_key, sizeof(*wrapped));
err_exit:
kfree(request);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_wrapkey);
uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id,
uint32_t kwkey_id,
uint32_t cred_slot,
struct tme_wrapped_key *wrapped,
struct tme_ext_err_info *err_info)
{
struct unwrap_key_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!wrapped || !err_info)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cmd.code = TME_HWKM_CMD_UNWRAP_KEY;
request->key_id = key_id;
request->kw_key_id = kwkey_id;
request->cbor_header = TME_UNWRAP_KEY_CBOR_TAG;
memcpy(&request->wrapped, wrapped, sizeof(*wrapped));
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM unwrap key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_unwrapkey);
uint32_t tme_hwkm_master_importkey(uint32_t key_id,
struct tme_key_policy *policy,
struct tme_plaintext_key *key_material,
uint32_t cred_slot,
struct tme_ext_err_info *err_info)
{
struct import_key_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!key_material || !err_info || !policy)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cmd.code = TME_HWKM_CMD_IMPORT_KEY;
request->key_id = key_id;
request->cred_slot = cred_slot;
request->cbor_header = TME_IMPORT_KEY_CBOR_TAG;
memcpy(&request->key_policy, policy, sizeof(*policy));
memcpy(&request->key_material, key_material, sizeof(*key_material));
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM import key request failed for %d\n", key_id);
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_importkey);
uint32_t tme_hwkm_master_broadcast_transportkey(
struct tme_ext_err_info *err_info)
{
struct broadcast_tpkey_req *request = NULL;
struct tme_response_sts *response = NULL;
uint32_t ret = 0;
size_t response_len = sizeof(*response);
if (!err_info)
return -EINVAL;
request = kzalloc(sizeof(*request), GFP_KERNEL);
response = kzalloc(response_len, GFP_KERNEL);
if (!request || !response) {
ret = -ENOMEM;
goto err_exit;
}
request->cbor_header = TME_BORADCAST_KEY_CBOR_TAG;
request->cmd.code = TME_HWKM_CMD_BROADCAST_TP_KEY;
ret = tmecom_process_request(request, sizeof(*request), response,
&response_len);
if (ret != 0) {
pr_err("HWKM broadcast TP key request failed\n");
goto err_exit;
}
if (response_len != sizeof(*response)) {
pr_err("HWKM response failed with invalid length: %u, %u\n",
response_len, sizeof(response));
ret = -EBADMSG;
goto err_exit;
}
ret = update_ext_err(err_info, response);
err_exit:
kfree(request);
kfree(response);
return ret;
}
EXPORT_SYMBOL(tme_hwkm_master_broadcast_transportkey);

View file

@ -0,0 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _TME_HWKM_MASTER_INTERFACE_H_
#define _TME_HWKM_MASTER_INTERFACE_H_
#include <linux/tme_hwkm_master_defs.h>
/**
* HWKM Master command IDs
*/
enum tme_hwkm_cmd {
TME_HWKM_CMD_CLEAR_KEY = 0, /**< Clear Key */
TME_HWKM_CMD_GENERATE_KEY = 1, /**< Generate Key */
TME_HWKM_CMD_DERIVE_KEY = 2, /**< Derive Key, NIST or HKDF */
TME_HWKM_CMD_WRAP_KEY = 3, /**< Wrap Key */
TME_HWKM_CMD_UNWRAP_KEY = 4, /**< Unwrap Key */
TME_HWKM_CMD_IMPORT_KEY = 5, /**< Import Key */
TME_HWKM_CMD_BROADCAST_TP_KEY = 6, /**< Broadcast Transport Key */
TMW_HWKM_CMD_INVALID = 7, /**< Invalid cmd */
};
/**
* Opcode and response structures
*/
/* Values as per TME_HWKM_CMD_* */
struct tme_hwkm_master_cmd {
uint32_t code;
} __packed;
struct tme_response_sts {
/* TME FW */
uint32_t tme_err_status; /**< TME FW Response status. */
/* SEQ FW */
uint32_t seq_err_status; /**< Contents of CSR_CMD_ERROR_STATUS */
/* SEQ HW Key Policy */
uint32_t seq_kp_err_status0; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */
uint32_t seq_kp_err_status1; /**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */
/* Debug information: log/print this information if any of the above fields is non-zero */
uint32_t seq_rsp_status; /**< Contents of CSR_CMD_RESPONSE_STATUS */
} __packed;
/**
* Clear Key ID structures
*/
struct clear_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_CLEAR_KEY */
uint32_t key_id; /**< The ID of the key to clear.*/
} __packed;
/**
* Generate Key ID structures
*/
struct gen_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_GENERATE_KEY */
uint32_t key_id; /**< The ID of the key to be generated. */
struct tme_key_policy key_policy;/**< The policy specifying the key to be generated. */
uint32_t cred_slot; /**< Credential slot to which this key will be bound. */
} __packed;
/**
* Derive Key ID structures
*/
struct derive_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_DERIVE_KEY */
uint32_t key_id; /**< The ID of the key to be derived. */
struct tme_kdf_spec kdf_info; /**< Specifies how the key is to be derived. */
uint32_t cred_slot; /**< Credential slot to which this key will be bound. */
} __packed;
/**
* Wrap Key ID structures
*/
struct wrap_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_WRAP_KEY */
uint32_t key_id; /**< The ID of the key to secure the target key. */
uint32_t target_key_id; /**< Denotes the key to be wrapped. */
uint32_t cred_slot; /**< Credential slot to which this key is bound. */
} __packed;
struct wrap_key_resp {
struct tme_response_sts status; /**< Response status. */
struct tme_wrapped_key wrapped_key; /**< The wrapped key. */
} __packed;
/**
* Unwrap Key ID structures
*/
struct unwrap_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_UNWRAP_KEY */
uint32_t key_id; /**< The ID of the key to be unwrapped. */
uint32_t kw_key_id; /**< The ID of the key to be used to unwrap the key. */
struct tme_wrapped_key wrapped; /**< The key to be unwrapped. */
uint32_t cred_slot; /**< Credential slot to which this key will be bound. */
} __packed;
/**
* Import Key ID structures
*/
struct import_key_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd; /**< @c TME_HWKM_CMD_IMPORT_KEY */
uint32_t key_id; /**< The ID of the key to be imported. */
struct tme_key_policy key_policy;/**< The Key Policy to be associated with the key. */
struct tme_plaintext_key key_material;/**< The plain-text key material. */
uint32_t cred_slot; /**< Credential slot to which this key will be bound. */
} __packed;
/**
* Broadcast Transport Key structures
*/
struct broadcast_tpkey_req {
uint32_t cbor_header; /**< CBOR encoded tag */
struct tme_hwkm_master_cmd cmd;/**< @c TME_HWKM_CMD_BROADCAST_TP_KEY */
} __packed;
#endif /* _TME_HWKM_MASTER_INTERFACE_H_ */

View file

@ -0,0 +1,316 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mailbox_client.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
#include <linux/mailbox/qmp.h>
#include <linux/uaccess.h>
#include <linux/mailbox_controller.h>
#include "tmecom.h"
struct tmecom {
struct device *dev;
struct mbox_client cl;
struct mbox_chan *chan;
struct mutex lock;
struct qmp_pkt pkt;
wait_queue_head_t waitq;
void *txbuf;
bool rx_done;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
#include <linux/tme_hwkm_master_defs.h>
#include <linux/tme_hwkm_master.h>
char dpkt[MBOX_MAX_MSG_LEN + 1];
struct dentry *debugfs_file;
#endif /* CONFIG_DEBUG_FS */
static struct tmecom *tmedev;
/**
* tmecom_msg_hdr - Request/Response message header between HLOS and TME.
*
* This header is proceeding any request specific parameters.
* The transaction id is used to match request with response.
*
* Note: glink/QMP layer provides the rx/tx data size, so user payload size
* is calculated by reducing the header size.
*/
struct tmecom_msg_hdr {
unsigned int reserved; /* for future use */
unsigned int txnid; /* transaction id */
} __packed;
#define TMECOM_TX_HDR_SIZE sizeof(struct tmecom_msg_hdr)
#define CBOR_NUM_BYTES (sizeof(unsigned int))
#define TMECOM_RX_HDR_SIZE (TMECOM_TX_HDR_SIZE + CBOR_NUM_BYTES)
/*
* CBOR encode emulation
* Prepend tmecom_msg_hdr space
* CBOR tag is prepended in request
*/
static inline size_t tmecom_encode(struct tmecom *tdev, const void *reqbuf,
size_t size)
{
unsigned int *msg = tdev->txbuf + TMECOM_TX_HDR_SIZE;
unsigned int *src = (unsigned int *)reqbuf;
memcpy(msg, src, size);
return (size + TMECOM_TX_HDR_SIZE);
}
/*
* CBOR decode emulation
* Strip tmecom_msg_hdr & CBOR tag
*/
static inline size_t tmecom_decode(struct tmecom *tdev, void *respbuf)
{
unsigned int *msg = tdev->pkt.data + TMECOM_RX_HDR_SIZE;
unsigned int *rbuf = (unsigned int *)respbuf;
memcpy(rbuf, msg, (tdev->pkt.size - TMECOM_RX_HDR_SIZE));
return (tdev->pkt.size - TMECOM_RX_HDR_SIZE);
}
static bool tmecom_check_rx_done(struct tmecom *tdev)
{
return tdev->rx_done;
}
int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf,
size_t *respsize)
{
struct tmecom *tdev = tmedev;
long time_left = 0;
int ret = 0;
/*
* Check to handle if probe is not successful or not completed yet
*/
if (!tdev) {
pr_err("%s: tmecom dev is NULL\n", __func__);
return -ENODEV;
}
if (!reqbuf || !reqsize || (reqsize > MBOX_MAX_MSG_LEN)) {
dev_err(tdev->dev, "invalid reqbuf or reqsize\n");
return -EINVAL;
}
if (!respbuf || !respsize || (*respsize > MBOX_MAX_MSG_LEN)) {
dev_err(tdev->dev, "invalid respbuf or respsize\n");
return -EINVAL;
}
mutex_lock(&tdev->lock);
tdev->rx_done = false;
tdev->pkt.size = tmecom_encode(tdev, reqbuf, reqsize);
/*
* Controller expects a 4 byte aligned buffer
*/
tdev->pkt.size = (tdev->pkt.size + 0x3) & ~0x3;
tdev->pkt.data = tdev->txbuf;
pr_debug("tmecom encoded request size = %u\n", tdev->pkt.size);
print_hex_dump_bytes("tmecom sending bytes : ",
DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size);
if (mbox_send_message(tdev->chan, &tdev->pkt) < 0) {
dev_err(tdev->dev, "failed to send qmp message\n");
ret = -EAGAIN;
goto err_exit;
}
time_left = wait_event_interruptible_timeout(tdev->waitq,
tmecom_check_rx_done(tdev), tdev->cl.tx_tout);
if (!time_left) {
dev_err(tdev->dev, "request timed out\n");
ret = -ETIMEDOUT;
goto err_exit;
}
dev_info(tdev->dev, "response received\n");
pr_debug("tmecom received size = %u\n", tdev->pkt.size);
print_hex_dump_bytes("tmecom received bytes : ",
DUMP_PREFIX_ADDRESS, tdev->pkt.data, tdev->pkt.size);
*respsize = tmecom_decode(tdev, respbuf);
tdev->rx_done = false;
ret = 0;
err_exit:
mutex_unlock(&tdev->lock);
return ret;
}
EXPORT_SYMBOL(tmecom_process_request);
#if IS_ENABLED(CONFIG_DEBUG_FS)
static ssize_t tmecom_debugfs_write(struct file *file,
const char __user *userstr, size_t len, loff_t *pos)
{
int ret = 0;
size_t rxlen = 0;
struct tme_ext_err_info *err_info = (struct tme_ext_err_info *)dpkt;
if (!len || (len > MBOX_MAX_MSG_LEN)) {
pr_err("invalid message length\n");
return -EINVAL;
}
memset(dpkt, 0, sizeof(*dpkt));
ret = copy_from_user(dpkt, userstr, len);
if (ret) {
pr_err("%s copy from user failed, ret=%d\n", __func__, ret);
return len;
}
tmecom_process_request(dpkt, len, dpkt, &rxlen);
print_hex_dump_bytes("tmecom decoded bytes : ",
DUMP_PREFIX_ADDRESS, dpkt, rxlen);
pr_debug("calling TME_HWKM_CMD_BROADCAST_TP_KEY api\n");
ret = tme_hwkm_master_broadcast_transportkey(err_info);
if (ret == 0)
pr_debug("%s successful\n", __func__);
return len;
}
static const struct file_operations tmecom_debugfs_ops = {
.open = simple_open,
.write = tmecom_debugfs_write,
};
#endif /* CONFIG_DEBUG_FS */
static void tmecom_receive_message(struct mbox_client *client, void *message)
{
struct tmecom *tdev = dev_get_drvdata(client->dev);
struct qmp_pkt *pkt = NULL;
if (!message) {
dev_err(tdev->dev, "spurious message received\n");
goto tmecom_receive_end;
}
if (tdev->rx_done) {
dev_err(tdev->dev, "tmecom response pending\n");
goto tmecom_receive_end;
}
pkt = (struct qmp_pkt *)message;
tdev->pkt.size = pkt->size;
tdev->pkt.data = pkt->data;
tdev->rx_done = true;
tmecom_receive_end:
wake_up_interruptible(&tdev->waitq);
}
static int tmecom_probe(struct platform_device *pdev)
{
struct tmecom *tdev;
const char *label;
char name[32];
tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev)
return -ENOMEM;
tdev->cl.dev = &pdev->dev;
tdev->cl.tx_block = true;
tdev->cl.tx_tout = 500;
tdev->cl.knows_txdone = false;
tdev->cl.rx_callback = tmecom_receive_message;
label = of_get_property(pdev->dev.of_node, "mbox-names", NULL);
if (!label)
return -EINVAL;
snprintf(name, 32, "%s_send_message", label);
tdev->chan = mbox_request_channel(&tdev->cl, 0);
if (IS_ERR(tdev->chan)) {
dev_err(&pdev->dev, "failed to get mbox channel\n");
return PTR_ERR(tdev->chan);
}
mutex_init(&tdev->lock);
if (tdev->chan) {
tdev->txbuf =
devm_kzalloc(&pdev->dev, MBOX_MAX_MSG_LEN, GFP_KERNEL);
if (!tdev->txbuf) {
dev_err(&pdev->dev, "message buffer alloc faile\n");
return -ENOMEM;
}
}
init_waitqueue_head(&tdev->waitq);
#if IS_ENABLED(CONFIG_DEBUG_FS)
debugfs_file = debugfs_create_file(name, 0220, NULL, tdev,
&tmecom_debugfs_ops);
if (!debugfs_file)
goto err;
#endif /* CONFIG_DEBUG_FS */
tdev->rx_done = false;
tdev->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, tdev);
tmedev = tdev;
dev_info(&pdev->dev, "tmecom probe success\n");
return 0;
err:
mbox_free_channel(tdev->chan);
return -ENOMEM;
}
static int tmecom_remove(struct platform_device *pdev)
{
struct tmecom *tdev = platform_get_drvdata(pdev);
#if IS_ENABLED(CONFIG_DEBUG_FS)
debugfs_remove(debugfs_file);
#endif /* CONFIG_DEBUG_FS */
if (tdev->chan)
mbox_free_channel(tdev->chan);
dev_info(&pdev->dev, "tmecom remove success\n");
return 0;
}
static const struct of_device_id tmecom_match_tbl[] = {
{.compatible = "qcom,tmecom-qmp-client"},
{},
};
static struct platform_driver tmecom_driver = {
.probe = tmecom_probe,
.remove = tmecom_remove,
.driver = {
.name = "tmecom-qmp-client",
.suppress_bind_attrs = true,
.of_match_table = tmecom_match_tbl,
},
};
module_platform_driver(tmecom_driver);
MODULE_DESCRIPTION("MSM TMECom QTI mailbox protocol client");
MODULE_LICENSE("GPL");

View file

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _TMECOM_H_
#define _TMECOM_H_
#define MBOX_MAX_MSG_LEN 1024
int tmecom_process_request(const void *reqbuf, size_t reqsize, void *respbuf,
size_t *respsize);
#endif /*_TMECOM_H_ */

View file

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CRYPTO_QTI_COMMON_H
#define _CRYPTO_QTI_COMMON_H
#include <linux/blk-crypto.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/delay.h>
#define RAW_SECRET_SIZE 32
#define QTI_ICE_MAX_BIST_CHECK_COUNT 100
#define QTI_ICE_TYPE_NAME_LEN 8
/* Storage types for crypto */
#define UFS_CE 10
#define SDCC_CE 20
struct ice_mmio_data {
void __iomem *ice_base_mmio;
#if IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER)
void __iomem *ice_hwkm_mmio;
#endif
};
#if IS_ENABLED(CONFIG_QTI_CRYPTO_COMMON)
int crypto_qti_init_crypto(void *mmio_data);
int crypto_qti_enable(void *mmio_data);
void crypto_qti_disable(void);
int crypto_qti_debug(const struct ice_mmio_data *mmio_data);
int crypto_qti_keyslot_program(const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key,
unsigned int slot, u8 data_unit_mask,
int capid);
int crypto_qti_keyslot_evict(const struct ice_mmio_data *mmio_data,
unsigned int slot);
int crypto_qti_derive_raw_secret(const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size);
#else
static inline int crypto_qti_init_crypto(void *mmio_data)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_enable(void *mmio_data)
{
return -EOPNOTSUPP;
}
static inline void crypto_qti_disable(void)
{}
static inline int crypto_qti_debug(void)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_keyslot_program(
const struct ice_mmio_data *mmio_data,
const struct blk_crypto_key *key,
unsigned int slot,
u8 data_unit_mask,
int capid)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_keyslot_evict(const struct ice_mmio_data *mmio_data,
unsigned int slot)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_derive_raw_secret(
const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret,
unsigned int secret_size)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_QTI_CRYPTO_COMMON */
#endif /* _CRYPTO_QTI_COMMON_H */

View file

@ -0,0 +1,153 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HDCP_QSEECOM_H
#define __HDCP_QSEECOM_H
#include <linux/types.h>
#define HDCP_QSEECOM_ENUM_STR(x) #x
enum hdcp2_app_cmd {
HDCP2_CMD_START,
HDCP2_CMD_START_AUTH,
HDCP2_CMD_STOP,
HDCP2_CMD_PROCESS_MSG,
HDCP2_CMD_TIMEOUT,
HDCP2_CMD_EN_ENCRYPTION,
HDCP2_CMD_QUERY_STREAM,
};
struct hdcp2_buffer {
unsigned char *data;
u32 length;
};
struct hdcp2_app_data {
u32 timeout;
bool repeater_flag;
struct hdcp2_buffer request; // requests to TA, sent from sink
struct hdcp2_buffer response; // responses from TA, sent to sink
};
struct hdcp1_topology {
uint32_t depth;
uint32_t device_count;
uint32_t max_devices_exceeded;
uint32_t max_cascade_exceeded;
uint32_t hdcp2LegacyDeviceDownstream;
uint32_t hdcp1DeviceDownstream;
};
static inline const char *hdcp2_app_cmd_str(enum hdcp2_app_cmd cmd)
{
switch (cmd) {
case HDCP2_CMD_START:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START);
case HDCP2_CMD_START_AUTH:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH);
case HDCP2_CMD_STOP:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP);
case HDCP2_CMD_PROCESS_MSG:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_PROCESS_MSG);
case HDCP2_CMD_TIMEOUT:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_TIMEOUT);
case HDCP2_CMD_EN_ENCRYPTION:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_EN_ENCRYPTION);
case HDCP2_CMD_QUERY_STREAM:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_QUERY_STREAM);
default: return "???";
}
}
#if IS_ENABLED(CONFIG_HDCP_QSEECOM)
void *hdcp1_init(void);
void hdcp1_deinit(void *data);
bool hdcp1_feature_supported(void *data);
int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb);
int hdcp1_set_enc(void *data, bool enable);
int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated);
void hdcp1_stop(void *data);
void *hdcp2_init(u32 device_type);
void hdcp2_deinit(void *ctx);
bool hdcp2_feature_supported(void *ctx);
int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd,
struct hdcp2_app_data *app_data);
int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id,
uint8_t stream_number, uint32_t *stream_id);
int hdcp2_close_stream(void *ctx, uint32_t stream_id);
int hdcp2_force_encryption(void *ctx, uint32_t enable);
#else
static inline void *hdcp1_init(void)
{
return NULL;
}
static inline void hdcp1_deinit(void *data)
{
}
static inline bool hdcp1_feature_supported(void *data)
{
return false;
}
static inline int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb)
{
return 0;
}
static inline int hdcp1_ops_notify(void *data, void *topology, bool is_authenticated)
{
return 0;
}
static inline int hdcp1_set_enc(void *data, bool enable)
{
return 0;
}
static inline void hdcp1_stop(void *data)
{
}
static inline void *hdcp2_init(u32 device_type)
{
return NULL;
}
static inline void hdcp2_deinit(void *ctx)
{
}
static inline bool hdcp2_feature_supported(void *ctx)
{
return false;
}
static inline int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd,
struct hdcp2_app_data *app_data)
{
return 0;
}
static inline int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id,
uint8_t stream_number, uint32_t *stream_id)
{
return 0;
}
static inline int hdcp2_close_stream(void *ctx, uint32_t stream_id)
{
return 0;
}
static inline int hdcp2_force_encryption(void *ctx, uint32_t enable)
{
return 0;
}
#endif /* CONFIG_HDCP_QSEECOM */
#endif /* __HDCP_QSEECOM_H */

382
include/linux/hwkm.h Normal file
View file

@ -0,0 +1,382 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HWKM_H_
#define __HWKM_H_
#include <linux/types.h>
#include <stdbool.h>
#include <stddef.h>
#include <linux/tme_hwkm_master_defs.h>
#include <linux/crypto-qti-common.h>
/* Maximum number of bytes in a key used in a KEY_SLOT_RDWR operation */
#define HWKM_MAX_KEY_SIZE TME_PT_KEY_BYTES_MAX
/* Maximum number of bytes in a SW ctx used in a SYSTEM_KDF operation */
#define HWKM_MAX_CTX_SIZE TME_KDF_SW_CONTEXT_BYTES_MAX
/* Maximum number of bytes in a WKB used in a key wrap or unwrap operation */
#define HWKM_MAX_BLOB_SIZE TME_WK_CONTEXT_BYTES_MAX
#define HWKM_TPKEY_SLOT_MASTER TME_KID_TP
#define HWKM_TPKEY_SLOT_ICE 0x8C
#define HWKM_EXPECTED_UNWRAP_KEY_SIZE 100
/* Opcodes to be set in the op field of a command */
enum hwkm_op {
/* Opcode to generate a random key */
NIST_KEYGEN = 0,
/* Opcode to derive a key */
SYSTEM_KDF,
/* Used only by HW */
QFPROM_KEY_RDWR,
/* Opcode to wrap a key and export the wrapped key */
KEY_WRAP_EXPORT,
/*
* Opcode to import a wrapped key and unwrap it in the
* specified key slot
*/
KEY_UNWRAP_IMPORT,
/* Opcode to clear a slot */
KEY_SLOT_CLEAR,
/* Opcode to read or write a key from/to a slot */
KEY_SLOT_RDWR,
/*
* Opcode to broadcast a TPKEY to all slaves configured
* to receive a TPKEY.
*/
SET_TPKEY,
HWKM_MAX_OP,
HWKM_UNDEF_OP = 0xFF
};
/*
* Algorithm values which can be used in the alg_allowed field of the
* key policy.
*/
enum hwkm_alg {
/* Symmetric Algorithms */
AES128_ECB = TME_KT_Symmetric | TME_KAL_AES128_ECB | TME_KL_128,
AES256_ECB = TME_KT_Symmetric | TME_KAL_AES256_ECB | TME_KL_256,
DES_ECB = TME_KT_Symmetric | TME_KAL_DES_ECB | TME_KL_64,
TDES_ECB = TME_KT_Symmetric | TME_KAL_TDES_ECB | TME_KL_192,
AES128_CBC = TME_KT_Symmetric | TME_KAL_AES128_CBC | TME_KL_128,
AES256_CBC = TME_KT_Symmetric | TME_KAL_AES256_CBC | TME_KL_256,
DES_CBC = TME_KT_Symmetric | TME_KAL_DES_CBC | TME_KL_64,
TDES_CBC = TME_KT_Symmetric | TME_KAL_TDES_CBC | TME_KL_192,
AES128_CCM_TC = TME_KT_Symmetric | TME_KAL_AES128_CCM_TC | TME_KL_128,
AES128_CCM_NTC = TME_KT_Symmetric | TME_KAL_AES128_CCM_NTC | TME_KL_128,
AES256_CCM_TC = TME_KT_Symmetric | TME_KAL_AES256_CCM_TC | TME_KL_256,
AES256_CCM_NTC = TME_KT_Symmetric | TME_KAL_AES256_CCM_NTC | TME_KL_256,
AES256_SIV = TME_KT_Symmetric | TME_KAL_AES256_SIV | TME_KL_512,
AES128_CTR = TME_KT_Symmetric | TME_KAL_AES128_CTR | TME_KL_128,
AES256_CTR = TME_KT_Symmetric | TME_KAL_AES256_CTR | TME_KL_256,
AES128_XTS = TME_KT_Symmetric | TME_KAL_AES128_XTS | TME_KL_512,
AES256_XTS = TME_KT_Symmetric | TME_KAL_AES256_XTS | TME_KL_512,
SHA1_HMAC = TME_KT_Symmetric | TME_KAL_SHA1_HMAC | TME_KL_512,
SHA256_HMAC = TME_KT_Symmetric | TME_KAL_SHA256_HMAC | TME_KL_512,
AES128_CMAC = TME_KT_Symmetric | TME_KAL_AES128_CMAC | TME_KL_128,
AES256_CMAC = TME_KT_Symmetric | TME_KAL_AES256_CMAC | TME_KL_256,
SHA384_HMAC = TME_KT_Symmetric | TME_KAL_SHA384_HMAC | TME_KL_512,
SHA512_HMAC = TME_KT_Symmetric | TME_KAL_SHA512_HMAC | TME_KL_512,
AES128_GCM = TME_KT_Symmetric | TME_KAL_AES128_GCM | TME_KL_128,
AES256_GCM = TME_KT_Symmetric | TME_KAL_AES256_GCM | TME_KL_256,
// TODO: Verify Key Lengths for these algorithms
KASUMI = TME_KT_Symmetric | TME_KAL_KASUMI | TME_KL_128,
SNOW3G = TME_KT_Symmetric | TME_KAL_SNOW3G | TME_KL_128,
ZUC = TME_KT_Symmetric | TME_KAL_ZUC | TME_KL_128,
PRINCE = TME_KT_Symmetric | TME_KAL_PRINCE | TME_KL_128,
SIPHASH = TME_KT_Symmetric | TME_KAL_SIPHASH | TME_KL_128,
KDF_NIST = TME_KT_Symmetric | TME_KAL_KDF_NIST | TME_KL_512,
KDF_HKDF = TME_KT_Symmetric | TME_KAL_KDF_HKDF,
/* Asymmetric Algorithms */
ECDSA_P224_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_224 |
TME_KAL_ECC_CURVE_NIST,
ECDSA_P256_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_256 |
TME_KAL_ECC_CURVE_NIST,
ECDSA_P384_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_384 |
TME_KAL_ECC_CURVE_NIST,
ECDSA_P521_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_521 |
TME_KAL_ECC_CURVE_NIST,
ECDSA_P224_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_224 |
TME_KAL_ECC_CURVE_BPOOL,
ECDSA_P256_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_256 |
TME_KAL_ECC_CURVE_BPOOL,
ECDSA_P384_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_384 |
TME_KAL_ECC_CURVE_BPOOL,
ECDSA_P512_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDSA | TME_KL_512 |
TME_KAL_ECC_CURVE_BPOOL,
ECDH_P224_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_224 |
TME_KAL_ECC_CURVE_NIST,
ECDH_P256_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_256 |
TME_KAL_ECC_CURVE_NIST,
ECDH_P384_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_384 |
TME_KAL_ECC_CURVE_NIST,
ECDH_P521_NIST = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_521 |
TME_KAL_ECC_CURVE_NIST,
ECDH_P224_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_224 |
TME_KAL_ECC_CURVE_BPOOL,
ECDH_P256_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_256 |
TME_KAL_ECC_CURVE_BPOOL,
ECDH_P384_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_384 |
TME_KAL_ECC_CURVE_BPOOL,
ECDH_P512_BP = TME_KT_Asymmetric_ECC | TME_KAL_ECC_ALGO_ECDH | TME_KL_512 |
TME_KAL_ECC_CURVE_BPOOL,
HWKM_UNDEF_ALG = 0xFFFFFFFF
};
/* Key type values which can be used in the key_type field of the key policy */
enum hwkm_type {
KEY_DERIVATION_KEY = TME_KP_KeyDerivation,
KEY_WRAPPING_KEY = TME_KP_KWK_STORAGE,
KEY_SWAPPING_KEY = TME_KP_KWK_SESSION,
TRANSPORT_KEY = TME_KP_KWK_TRANSPORT,
GENERIC_KEY = TME_KP_Generic,
EXPORT_KEY = TME_KP_KWK_XPORT,
HWKM_UNDEF_KEY_TYPE = 0xFFFFFFFF
};
/* Destinations which a context can use */
enum hwkm_destination {
KM_MASTER = TME_KD_TME_HW,
GPCE_SLAVE = TME_KD_GPCE,
MCE_SLAVE = TME_KD_MDM_CE,
ICE_SLAVE = TME_KD_ICE,
HWKM_UNDEF_DESTINATION = 0xFFFFFFFF
};
/*
* Key security levels which can be set in the security_lvl field of
* key policy.
*/
enum hwkm_security_level {
/* Can be read by SW in plaintext using KEY_SLOT_RDWR cmd. */
SW_KEY = TME_KSL_SWKey,
/* Imported key managed by HW. */
MANAGED_KEY = TME_KSL_HWManagedKey,
/* Key only known to HW. */
HW_KEY = TME_KSL_HWKey,
HWKM_UNDEF_SECURITY_LEVEL = 0xFFFFFFFF
};
enum hwkm_key_lineage {
KEY_LINEAGE_NA = TME_KLI_NA,
KEY_LINEAGE_NOT_PROVISIONED_UNIQUE = TME_KLI_NP_CU,
KEY_LINEAGE_NOT_PROVISIONED_NOT_UNIQUE = TME_KLI_P_NCU,
KEY_LINEAGE_PROVISIONED_UNIQUE = TME_KLI_P_CU,
HWKM_UNDEF_KEY_LINEAGE = 0xFFFFFFFF
};
#define HWKM_CRED_SLOT_NONE TME_CRED_SLOT_ID_NONE
#define HWKM_CRED_SLOT_1 TME_CRED_SLOT_ID_1
#define HWKM_CRED_SLOT_2 TME_CRED_SLOT_ID_2
/** Slots 18-25 are reserved for use by TZ in the TME key table */
enum hwkm_master_key_slots {
/** L2 KDKs, used to derive keys by SW. Cannot be used for crypto, only key derivation */
TZ_NKDK_L2 = TME_KID_CHIP_FAM_L1,
TZ_PKDK_L2 = TME_KID_CHIP_UNIQUE_SEED,
TZ_SKDK_L2 = TME_KID_CHIP_UNIQUE_SEED,
TZ_UKDK_L2 = TME_KID_CHIP_RAND_BASE,
/** Slots reserved for TPKEY */
TPKEY_SLOT = TME_KID_TP,
/** Slots reserved for Swap key */
TZ_SWAP_KEY_SLOT = 18,
/** Reserved for wrapping keys to persist or unwrap keys */
TZ_WRAP_KEY_SLOT = 19,
/** Reserved for intermediate operations in IHWKeyFactory */
TZ_GENERAL_PURPOSE_SLOT1 = 20,
TZ_GENERAL_PURPOSE_SLOT2 = 21,
/** Reserved for mixing keys in KDF */
TZ_MIXING_KEY_SLOT = 22,
/** Used for asymmetric operations */
TZ_ASYMMETRIC_OPERATION_SLOT = 23,
/**
* Reserved for privileged use cases which need to persist a key
* and share it between execution environments.
*
* WARNING: Modifying these values may cause issues in execution
* environments which depend on these specific slots being used for
* privileged persistent use cases.
*/
PERSISTENT_SHARED_SLOT_PAIR1 = 24,
PERSISTENT_SHARED_SLOT_PAIR2 = 25,
MASTER_SLOT_MAX,
UNDEF_SLOT = 0xFF
};
struct hwkm_key_policy_v2_extension {
bool expand_allowed;
bool extract_allowed;
enum hwkm_key_lineage lineage;
u32 credential_slot;
bool export_key_wrap_allowed;
};
struct hwkm_key_policy {
bool km_by_spu_allowed;
bool km_by_modem_allowed;
bool km_by_nsec_allowed;
bool km_by_tz_allowed;
enum hwkm_alg alg_allowed;
bool enc_allowed;
bool dec_allowed;
enum hwkm_type key_type;
u8 kdf_depth;
bool wrap_export_allowed;
bool swap_export_allowed;
enum hwkm_security_level security_lvl;
enum hwkm_destination hw_destination;
bool wrap_with_tpk_allowed;
struct hwkm_key_policy_v2_extension v2;
};
struct hwkm_bsve {
bool enabled;
bool km_key_policy_ver_en;
bool km_apps_secure_en;
bool km_msa_secure_en;
bool km_lcm_fuse_en;
bool km_boot_stage_otp_en;
bool km_swc_en;
bool km_child_key_policy_en;
bool km_mks_en;
u64 km_fuse_region_sha_digest_en;
bool km_oem_id_en;
bool km_pkhash_en;
bool km_oem_product_id_en;
bool km_oem_product_seed_en;
};
struct hwkm_keygen_cmd {
u8 dks; /* Destination Key Slot */
struct hwkm_key_policy policy; /* Key policy */
};
struct hwkm_rdwr_cmd {
uint8_t slot; /* Key Slot */
bool is_write; /* Write or read op */
struct hwkm_key_policy policy; /* Key policy for write */
uint8_t key[HWKM_MAX_KEY_SIZE]; /* Key for write */
size_t sz; /* Length of key in bytes */
};
struct hwkm_kdf_cmd {
uint8_t dks; /* Destination Key Slot */
uint8_t kdk; /* Key Derivation Key Slot */
uint8_t mks; /* Mixing key slot (bsve controlled) */
struct hwkm_key_policy policy; /* Key policy. */
struct hwkm_bsve bsve; /* Binding state vector */
uint8_t ctx[HWKM_MAX_CTX_SIZE]; /* Context */
size_t sz; /* Length of context in bytes */
enum hwkm_alg parent_alg; /* Underlying KDF algorithm (required for TME) */
};
struct hwkm_set_tpkey_cmd {
uint8_t sks; /* The slot to use as the TPKEY */
};
struct hwkm_unwrap_cmd {
uint8_t dks; /* Destination Key Slot */
uint8_t kwk; /* Key Wrapping Key Slot */
uint8_t wkb[HWKM_MAX_BLOB_SIZE];/* Wrapped Key Blob */
uint8_t sz; /* Length of WKB in bytes */
};
struct hwkm_wrap_cmd {
uint8_t sks; /* Destination Key Slot */
uint8_t kwk; /* Key Wrapping Key Slot */
struct hwkm_bsve bsve; /* Binding state vector */
};
struct hwkm_clear_cmd {
uint8_t dks; /* Destination key slot */
bool is_double_key; /* Whether this is a double key */
};
struct hwkm_cmd {
enum hwkm_op op; /* Operation */
enum hwkm_destination dest;
union /* Structs with opcode specific parameters */
{
struct hwkm_keygen_cmd keygen;
struct hwkm_rdwr_cmd rdwr;
struct hwkm_kdf_cmd kdf;
struct hwkm_set_tpkey_cmd set_tpkey;
struct hwkm_unwrap_cmd unwrap;
struct hwkm_wrap_cmd wrap;
struct hwkm_clear_cmd clear;
};
};
struct hwkm_rdwr_rsp {
struct hwkm_key_policy policy; /* Key policy for read */
uint8_t key[HWKM_MAX_KEY_SIZE]; /* Only available for read op */
size_t sz; /* Length of the key (bytes) */
};
struct hwkm_wrap_rsp {
uint8_t wkb[HWKM_MAX_BLOB_SIZE]; /* Wrapping key blob */
size_t sz; /* key blob len (bytes) */
};
struct hwkm_rsp {
u32 status;
union /* Structs with opcode specific outputs */
{
struct hwkm_rdwr_rsp rdwr;
struct hwkm_wrap_rsp wrap;
};
};
#if IS_ENABLED(CONFIG_QTI_HW_KEY_MANAGER)
int qti_hwkm_handle_cmd(struct hwkm_cmd *cmd, struct hwkm_rsp *rsp);
int qti_hwkm_clocks(bool on);
int qti_hwkm_init(const struct ice_mmio_data *mmio_data);
#else
static inline int qti_hwkm_add_req(struct hwkm_cmd *cmd,
struct hwkm_rsp *rsp)
{
return -EOPNOTSUPP;
}
static inline int qti_hwkm_clocks(bool on)
{
return -EOPNOTSUPP;
}
static inline int qti_hwkm_init(const struct ice_mmio_data *mmio_data)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_QTI_HW_KEY_MANAGER */
#endif /* __HWKM_H_ */

View file

@ -0,0 +1,119 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _TME_HWKM_MASTER_H_
#define _TME_HWKM_MASTER_H_
#include <linux/tme_hwkm_master_defs.h>
/**
* API functions
*/
/**
* Clear a Key Table entry.
*
* @param [in] key_id The ID of the key to clear.
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_clearkey(uint32_t key_id,
struct tme_ext_err_info *err_info);
/**
* Generate a random key with an associated policy.
*
* @param [in] key_id The ID of the key to be generated.
* @param [in] policy The policy specifying the key to be generated.
* @param [in] cred_slot Credential slot to which this key will be bound.
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_generatekey(uint32_t key_id,
struct tme_key_policy *policy,
uint32_t cred_slot,
struct tme_ext_err_info *err_info);
/**
* Derive a KEY using either HKDF or NIST algorithms.
*
* @param [in] key_id The ID of the key to be derived.
* @param [in] kdf_info Specifies how the key is to be derived
* and the properties of the derived key.
* @param [in] cred_slot Credential slot to which this key will be bound.
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_derivekey(uint32_t key_id,
struct tme_kdf_spec *kdf_info,
uint32_t cred_slot,
struct tme_ext_err_info *err_info);
/**
* Wrap a key so that it can be safely moved outside the TME.
*
* @param [in] kwkey_id Denotes a key, already present in the
* Key Table, to be used to secure the target key.
* @param [in] targetkey_id Denotes the key to be wrapped.
* @param [in] cred_slot Credential slot to which this key is bound.
* @param [out] wrapped Buffer for wrapped key output from response
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_wrapkey(uint32_t key_id,
uint32_t targetkey_id,
uint32_t cred_slot,
struct tme_wrapped_key *wrapped,
struct tme_ext_err_info *err_info);
/**
* Unwrap a key from outside the TME and store in the Key Table.
*
* @param [in] key_id The ID of the key to be unwrapped.
* @param [in] kwkey_id Denotes a key, already present in the
* Key Table, to be used to unwrap the key.
* @param [in] cred_slot Credential slot to which this key will be bound.
* @param [in] wrapped The key to be unwrapped.
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_unwrapkey(uint32_t key_id,
uint32_t kwkey_id,
uint32_t cred_slot,
struct tme_wrapped_key *wrapped,
struct tme_ext_err_info *err_info);
/**
* Import a plaintext key from outside the TME and store in the Key Table.
*
* @param [in] key_id The ID of the key to be imported.
* @param [in] policy The Key Policy to be associated with the key.
* @param [in] keyMaterial The plaintext key material.
* @param [in] cred_slot Credential slot to which this key will be bound.
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_importkey(uint32_t key_id,
struct tme_key_policy *policy,
struct tme_plaintext_key *key_material,
uint32_t cred_slot,
struct tme_ext_err_info *err_info);
/**
* Broadcast Transport Key to HWKM slaves.
*
* @param [out] err_info Extended error info
*
* @return 0 if successful, error code otherwise.
*/
uint32_t tme_hwkm_master_broadcast_transportkey(
struct tme_ext_err_info *err_info);
#endif /* _TME_HWKM_MASTER_H_ */

View file

@ -0,0 +1,462 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _TME_HWKM_MASTER_DEFS_H_
#define _TME_HWKM_MASTER_DEFS_H_
#include <linux/types.h>
#define UINT32_C(x) (x ## U)
/**
* Key ID
*/
/* L1 Key IDs that are Key Table slot numbers */
/**< CUS, 512 bits, in fuses */
#define TME_KID_CHIP_UNIQUE_SEED 8
/**< CRBK, 512 bits, in fuses */
#define TME_KID_CHIP_RAND_BASE 9
/**< L1 Key derived from L0 slot numbers 0-3 or 4-7 */
#define TME_KID_CHIP_FAM_L1 10
/* Transport Key ID */
#define TME_KID_TP 11/**< 528 bits, retained */
/**
* KeyPolicy
*/
/** Key Policy: 64-bit integer with bit encoded values */
struct tme_key_policy {
uint32_t low;
uint32_t high;
} __packed;
#define TME_KPHALFBITS 32
#define TME_KPCOMBINE(lo32, hi32) (((uint64_t)(lo32)) | \
(((uint64_t)(hi32)) << TME_KPHALFBITS))
/**
* Fields in Key Policy low word
*/
/** Key Type: Fundamental crypto algorithm groups */
/**< Position of Key Type bits */
#define TME_KT_Shift 0
/**< Mask for Key Type bits */
#define TME_KT_Mask (UINT32_C(0x07) << TME_KT_Shift)
/**< Symmetric algorithms */
#define TME_KT_Symmetric (UINT32_C(0x00) << TME_KT_Shift)
/**< Asymmetric algorithms: ECC */
#define TME_KT_Asymmetric_ECC (UINT32_C(0x01) << TME_KT_Shift)
/**< Asymmetric algorithms: RSA */
#define TME_KT_Asymmetric_RSA (UINT32_C(0x05) << TME_KT_Shift)
/** Key Length */
/**< Position of Key Length bits */
#define TME_KL_Shift 3
/**< Mask for Key Length bits */
#define TME_KL_Mask (UINT32_C(0x0F) << TME_KL_Shift)
/**< 64 bits - AES/2TDES */
#define TME_KL_64 (UINT32_C(0x00) << TME_KL_Shift)
/**< 128 bits - AES/2TDES */
#define TME_KL_128 (UINT32_C(0x01) << TME_KL_Shift)
/**< 192 bits - AES/3TDES */
#define TME_KL_192 (UINT32_C(0x02) << TME_KL_Shift)
/**< 224 bits - ECDSA */
#define TME_KL_224 (UINT32_C(0x03) << TME_KL_Shift)
/**< 256 bits - ECDSA/AES */
#define TME_KL_256 (UINT32_C(0x04) << TME_KL_Shift)
/**< 384 bits - ECDSA */
#define TME_KL_384 (UINT32_C(0x05) << TME_KL_Shift)
/**< 448 bits - ECDSA */
#define TME_KL_448 (UINT32_C(0x06) << TME_KL_Shift)
/**< 512 bits - ECDSA/HMAC/KDF/AES-SIV/AES-XTS */
#define TME_KL_512 (UINT32_C(0x07) << TME_KL_Shift)
/**< 521 bits - ECDSA/HMAC/KDF */
#define TME_KL_521 (UINT32_C(0x08) << TME_KL_Shift)
/**< 2048 bits - RSA */
#define TME_KL_2048 (UINT32_C(0x09) << TME_KL_Shift)
/**< 3072 bits - RSA */
#define TME_KL_3072 (UINT32_C(0x0A) << TME_KL_Shift)
/**< 4096 bits - RSA */
#define TME_KL_4096 (UINT32_C(0x0B) << TME_KL_Shift)
/**
* Key Profile: Only applicable at present
* if Key Type is #TME_KT_Symmetric
*/
/**< Position of Key Profile bits */
#define TME_KP_Shift 7
/**< Mask for Key Class bits */
#define TME_KP_Mask (UINT32_C(0x07) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KP_Generic (UINT32_C(0x00) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric (aka KDK) */
#define TME_KP_KeyDerivation (UINT32_C(0x01) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric (aka KWK) */
#define TME_KP_KWK_STORAGE (UINT32_C(0x02) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric (aka KSK) */
#define TME_KP_KWK_SESSION (UINT32_C(0x03) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric (aka TPK) */
#define TME_KP_KWK_TRANSPORT (UINT32_C(0x04) << TME_KP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KP_KWK_XPORT (UINT32_C(0x05) << TME_KP_Shift)
/**< If Key Type is not #TME_KT_Symmetric */
#define TME_KP_Unused (UINT32_C(0x00) << TME_KP_Shift)
/** Key Operation: Crypto operations permitted for a key */
/**< Position of Key Operation bits */
#define TME_KOP_Shift 10
/**< Mask for Key Operation bits */
#define TME_KOP_Mask (UINT32_C(0x0F) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_Encryption (UINT32_C(0x01) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_Decryption (UINT32_C(0x02) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_MAC (UINT32_C(0x04) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_NISTDerive (UINT32_C(0x04) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_HKDFExtract (UINT32_C(0x08) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KOP_HKDFExpand (UINT32_C(0x09) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_ECC */
#define TME_KOP_ECDSASign (UINT32_C(0x01) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_ECC */
#define TME_KOP_ECDSAVerify (UINT32_C(0x02) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_ECC */
#define TME_KOP_ECDHSharedSecret (UINT32_C(0x04) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_RSA */
#define TME_KOP_RSAASign (UINT32_C(0x01) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_RSA */
#define TME_KOP_RSAAVerify (UINT32_C(0x02) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_RSA */
#define TME_KOP_RSAEnc (UINT32_C(0x04) << TME_KOP_Shift)
/**< If Key Type is #TME_KT_Asymmetric_RSA */
#define TME_KOP_RSADec (UINT32_C(0x08) << TME_KOP_Shift)
/** Key Algorithm */
/**< Position of Key Algorithm bits */
#define TME_KAL_Shift 14
/**< Mask for Key Algorithm bits */
#define TME_KAL_Mask (UINT32_C(0x3F) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Symmetric */
#define TME_KAL_AES128_ECB (UINT32_C(0x00) << TME_KAL_Shift)
#define TME_KAL_AES256_ECB (UINT32_C(0x01) << TME_KAL_Shift)
#define TME_KAL_DES_ECB (UINT32_C(0x02) << TME_KAL_Shift)
#define TME_KAL_TDES_ECB (UINT32_C(0x03) << TME_KAL_Shift)
#define TME_KAL_AES128_CBC (UINT32_C(0x04) << TME_KAL_Shift)
#define TME_KAL_AES256_CBC (UINT32_C(0x05) << TME_KAL_Shift)
#define TME_KAL_DES_CBC (UINT32_C(0x06) << TME_KAL_Shift)
#define TME_KAL_TDES_CBC (UINT32_C(0x07) << TME_KAL_Shift)
#define TME_KAL_AES128_CCM_TC (UINT32_C(0x08) << TME_KAL_Shift)
#define TME_KAL_AES128_CCM_NTC (UINT32_C(0x09) << TME_KAL_Shift)
#define TME_KAL_AES256_CCM_TC (UINT32_C(0x0A) << TME_KAL_Shift)
#define TME_KAL_AES256_CCM_NTC (UINT32_C(0x0B) << TME_KAL_Shift)
#define TME_KAL_AES256_SIV (UINT32_C(0x0C) << TME_KAL_Shift)
#define TME_KAL_AES128_CTR (UINT32_C(0x0D) << TME_KAL_Shift)
#define TME_KAL_AES256_CTR (UINT32_C(0x0E) << TME_KAL_Shift)
#define TME_KAL_AES128_XTS (UINT32_C(0x0F) << TME_KAL_Shift)
#define TME_KAL_AES256_XTS (UINT32_C(0x10) << TME_KAL_Shift)
#define TME_KAL_SHA1_HMAC (UINT32_C(0x11) << TME_KAL_Shift)
#define TME_KAL_SHA256_HMAC (UINT32_C(0x12) << TME_KAL_Shift)
#define TME_KAL_AES128_CMAC (UINT32_C(0x13) << TME_KAL_Shift)
#define TME_KAL_AES256_CMAC (UINT32_C(0x14) << TME_KAL_Shift)
#define TME_KAL_SHA384_HMAC (UINT32_C(0x15) << TME_KAL_Shift)
#define TME_KAL_SHA512_HMAC (UINT32_C(0x16) << TME_KAL_Shift)
#define TME_KAL_AES128_GCM (UINT32_C(0x17) << TME_KAL_Shift)
#define TME_KAL_AES256_GCM (UINT32_C(0x18) << TME_KAL_Shift)
#define TME_KAL_KASUMI (UINT32_C(0x19) << TME_KAL_Shift)
#define TME_KAL_SNOW3G (UINT32_C(0x1A) << TME_KAL_Shift)
#define TME_KAL_ZUC (UINT32_C(0x1B) << TME_KAL_Shift)
#define TME_KAL_PRINCE (UINT32_C(0x1C) << TME_KAL_Shift)
#define TME_KAL_SIPHASH (UINT32_C(0x1D) << TME_KAL_Shift)
#define TME_KAL_TDES_2KEY_CBC (UINT32_C(0x1E) << TME_KAL_Shift)
#define TME_KAL_TDES_2KEY_ECB (UINT32_C(0x1F) << TME_KAL_Shift)
#define TME_KAL_KDF_NIST (UINT32_C(0x20) << TME_KAL_Shift)
#define TME_KAL_KDF_HKDF (UINT32_C(0x21) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */
#define TME_KAL_ECC_ALGO_ECDSA (UINT32_C(0x00) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */
#define TME_KAL_ECC_ALGO_ECDH (UINT32_C(0x01) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */
#define TME_KAL_ECC_CURVE_NIST (UINT32_C(0x00) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is ECC */
#define TME_KAL_ECC_CURVE_BPOOL (UINT32_C(0x08) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */
#define TME_KAL_DSA (UINT32_C(0x00) << TME_KAL_Shift)
/**< If Key Type is #TME_KT_Asymmetric, Key Subtype is RSA */
#define TME_KAL_DH (UINT32_C(0x01) << TME_KAL_Shift)
/** Key Security Level */
/**< Position of Key Security Level bits */
#define TME_KSL_Shift 20
/**< Mask for Key Security Level bits */
#define TME_KSL_Mask (UINT32_C(0x03) << TME_KSL_Shift)
/**< Software Key */
#define TME_KSL_SWKey (UINT32_C(0x00) << TME_KSL_Shift)
/**< Hardware Managed Key */
#define TME_KSL_HWManagedKey (UINT32_C(0x01) << TME_KSL_Shift)
/**< Hardware Key */
#define TME_KSL_HWKey (UINT32_C(0x02) << TME_KSL_Shift)
/** Key Destination */
/**< Position of Key Destination bits */
#define TME_KD_Shift 22
/**< Mask for Key Destination bits */
#define TME_KD_Mask (UINT32_C(0x0F) << TME_KD_Shift)
/**< Master */
#define TME_KD_TME_HW (UINT32_C(0x01) << TME_KD_Shift)
/**< ICE Slave */
#define TME_KD_ICE (UINT32_C(0x02) << TME_KD_Shift)
/**< GPCE Slave */
#define TME_KD_GPCE (UINT32_C(0x04) << TME_KD_Shift)
/**< Modem CE Slave */
#define TME_KD_MDM_CE (UINT32_C(0x08) << TME_KD_Shift)
/** Key Owner */
/**< Position of Key Owner bits */
#define TME_KO_Shift 26
/**< Mask for Key Owner bits */
#define TME_KO_Mask (UINT32_C(0x0F) << TME_KO_Shift)
/**< TME Hardware */
#define TME_KO_TME_HW (UINT32_C(0x00) << TME_KO_Shift)
/**< TME Firmware */
#define TME_KO_TME_FW (UINT32_C(0x01) << TME_KO_Shift)
/**< TZ (= APPS-S) */
#define TME_KO_TZ (UINT32_C(0x02) << TME_KO_Shift)
/**< HLOS / HYP (= APPS-NS) */
#define TME_KO_HLOS_HYP (UINT32_C(0x03) << TME_KO_Shift)
/**< Modem */
#define TME_KO_MDM (UINT32_C(0x04) << TME_KO_Shift)
/**< SPU */
#define TME_KO_SPU (UINT32_C(0x0F) << TME_KO_Shift)
/** Key Lineage */
/**< Position of Key Lineage bits */
#define TME_KLI_Shift 30
/**< Mask for Key Lineage bits */
#define TME_KLI_Mask (UINT32_C(0x03) << TME_KLI_Shift)
/**< Not applicable */
#define TME_KLI_NA (UINT32_C(0x00) << TME_KLI_Shift)
/**< Not provisioned, chip unique */
#define TME_KLI_NP_CU (UINT32_C(0x01) << TME_KLI_Shift)
/**< Provisioned, not chip unique */
#define TME_KLI_P_NCU (UINT32_C(0x02) << TME_KLI_Shift)
/**< Provisioned, chip unique */
#define TME_KLI_P_CU (UINT32_C(0x03) << TME_KLI_Shift)
/**
* Fields in Key Policy high word *
*/
/** Reserved Bits, Group 1 */
/**< Position of Reserved bits */
#define TME_KR1_Shift (32 - TME_KPHALFBITS)
/**< Mask for Reserved bits */
#define TME_KR1_Mask (UINT32_C(0x01) << TME_KR1_Shift)
/** Key Wrapping Constraints */
/**< Position of Key Attribute bits */
#define TME_KWC_Shift (33 - TME_KPHALFBITS)
/**< Mask for Key Attribute bits */
#define TME_KWC_Mask (UINT32_C(0x0F) << TME_KWC_Shift)
/**< Key is wrappable with KWK_EXPORT */
#define TME_KWC_Wrappable_KXP (UINT32_C(0x01) << TME_KWC_Shift)
/**< Key is wrappable with KWK_STORAGE */
#define TME_KWC_Wrappable_KWK (UINT32_C(0x02) << TME_KWC_Shift)
/**< Key is wrappable with KWK_TRANSPORT */
#define TME_KWC_Wrappable_KTP (UINT32_C(0x04) << TME_KWC_Shift)
/**< Key is wrappable with KWK_SESSION */
#define TME_KWC_Wrappable_KSK (UINT32_C(0x08) << TME_KWC_Shift)
/** Throttling */
/**< Position of Throttling bits */
#define TME_KTH_Shift (37 - TME_KPHALFBITS)
/**< Mask for Throttling bits */
#define TME_KTH_Mask (UINT32_C(0x01) << TME_KTH_Shift)
/**< Throttling enabled */
#define TME_KTH_Enabled (UINT32_C(0x01) << TME_KTH_Shift)
/** Reserved Bits, Group 2 */
/**< Position of Reserved bits */
#define TME_KR2_Shift (38 - TME_KPHALFBITS)
/**< Mask for Reserved bits */
#define TME_KR2_Mask (UINT32_C(0x3F) << TME_KR2_Shift)
/** Key Policy Version */
/**< Position of Key Policy Version bits */
#define TME_KPV_Shift (44 - TME_KPHALFBITS)
/**< Mask for Key Policy Version bits */
#define TME_KPV_Mask (UINT32_C(0x0F) << TME_KPV_Shift)
/**< Mask for Key Policy Version bits */
#define TME_KPV_Version (UINT32_C(0x03) << TME_KPV_Shift)
/** Key Authorised Users */
/**< Position of Authorised User bits */
#define TME_KAU_Shift (48 - TME_KPHALFBITS)
/**< Mask for Authorised User bits */
#define TME_KAU_Mask (UINT32_C(0xFF) << TME_KAU_Shift)
/**< Key usable by TME Hardware */
#define TME_KAU_TME_HW (UINT32_C(0x01) << TME_KAU_Shift)
/**< Key usable by TME Firmware */
#define TME_KAU_TME_FW (UINT32_C(0x02) << TME_KAU_Shift)
/**< Key usable by TZ (= APPS_S) */
#define TME_KAU_TZ (UINT32_C(0x04) << TME_KAU_Shift)
/**< Key usable by HLOS / HYP (= APPS_NS) */
#define TME_KAU_HLOS_HYP (UINT32_C(0x08) << TME_KAU_Shift)
/**< Key usable by Modem */
#define TME_KAU_MDM (UINT32_C(0x10) << TME_KAU_Shift)
/**< Key usable by SPU */
#define TME_KAU_SPU (UINT32_C(0x20) << TME_KAU_Shift)
/**< Key usable by all EEs */
#define TME_KAU_ALL TME_KAU_Mask
/**
* Credentials for throttling
*/
#define TME_CRED_SLOT_ID_NONE 0 /**< No throttling */
#define TME_CRED_SLOT_ID_1 1 /**< Credential slot 1 */
#define TME_CRED_SLOT_ID_2 2 /**< Credential slot 2 */
/**
* KDFSpec and associated structures
*/
/** Maximum context size that can be sent to the TME, in bytes */
#define TME_KDF_SW_CONTEXT_BYTES_MAX 128
#define TME_KDF_SALT_LABEL_BYTES_MAX 64
/**
* Security info to be appended to a KDF context by the Sequencer
*
* These fields allow keys to be tied to specific devices, states,
* OEMs, subsystems, etc.
* Values are obtained by the Sequencer from hardware, such as
* fuses or internal registers.
*/
#define TME_KSC_SOCTestSignState 0x00000001 /**< (32 bits) */
#define TME_KSC_SOCSecBootState 0x00000002 /**< (8 bits) */
#define TME_KSC_SOCDebugState 0x00000004 /**< (8 bits) */
#define TME_KSC_TMELifecycleState 0x00000008 /**< (8 bits) */
#define TME_KSC_BootStageOTP 0x00000010 /**< (8 bits) */
#define TME_KSC_SWContext 0x00000020 /**< (variable) */
#define TME_KSC_ChildKeyPolicy 0x00000040 /**< (64 bits) */
#define TME_KSC_MixingKey 0x00000080 /**< (key len) */
#define TME_KSC_ChipUniqueID 0x00000100 /**< (64 bits) */
#define TME_KSC_ChipDeviceNumber 0x00000200 /**< (32 bits) */
#define TME_KSC_TMEPatchVer 0x00000400 /**< (512 bits) */
#define TME_KSC_SOCPatchVer 0x00000800 /**< (512 bits) */
#define TME_KSC_OEMID 0x00001000 /**< (16 bits) */
#define TME_KSC_OEMProductID 0x00002000 /**< (16 bits) */
#define TME_KSC_TMEImgSecVer 0x00004000 /**< (512 bits) */
#define TME_KSC_SOCInitImgSecVer 0x00008000 /**< (512 bits) */
#define TME_KSC_OEMMRCHash 0x00010000 /**< (512 bits) */
#define TME_KSC_OEMProductSeed 0x00020000 /**< (128 bits) */
#define TME_KSC_SeqPatchVer 0x00040000 /**< (512 bits) */
#define TME_KSC_HWMeasurement1 0x00080000 /**< (512 bits) */
#define TME_KSC_HWMeasurement2 0x00100000 /**< (512 bits) */
#define TME_KSC_Reserved 0xFFE00000 /**< RFU */
/** KDF Specification: encompasses both HKDF and NIST KDF algorithms */
struct tme_kdf_spec {
/* Info common to HKDF and NIST algorithms */
/**< @c TME_KAL_KDF_HKDF or @c TME_KAL_KDF_NIST */
uint32_t kdfalgo;
/**< IKM for HKDF; IKS for NIST */
uint32_t inputkey;
/**< If @c TME_KSC_MixingKey set in Security Context */
uint32_t mixkey;
/**< If deriving a L3 key */
uint32_t l2key;
/**< Derived key policy */
struct tme_key_policy policy;
/**< Software provided context */
uint8_t swcontext[TME_KDF_SW_CONTEXT_BYTES_MAX];
/**< Length of @c swContext in bytes */
uint32_t swcontextLength;
/**< Info to be appended to @c swContext */
uint32_t security_context;
/**< Salt for HKDF; Label for NIST */
uint8_t salt_label[TME_KDF_SALT_LABEL_BYTES_MAX];
/**< Length of @c saltLabel in bytes */
uint32_t salt_labelLength;
/* Additional info specific to HKDF: kdfAlgo == @c KAL_KDF_HKDF */
/**< PRF Digest algorithm: @c KAL_SHA256_HMAC or @c KAL_SHA512_HMAC */
uint32_t prf_digest_algo;
} __packed;
/**
* WrappedKey and associated structures
*/
/* Maximum wrapped key context size, in bytes */
/**< Cipher Text 68B, MAC 16B, KeyPolicy 8B, Nonce 8B */
#define TME_WK_CONTEXT_BYTES_MAX 100
struct tme_wrapped_key {
/**< Wrapped key context */
uint8_t key[TME_WK_CONTEXT_BYTES_MAX];
/**< Length of @c key in bytes*/
uint32_t length;
} __packed;
/**
* Plain text Key and associated structures
*/
/* Maximum plain text key size, in bytes */
#define TME_PT_KEY_BYTES_MAX 68
/**
* Key format for intrinsically word aligned key
* lengths like 128/256/384/512... bits.
*
* Example: 256-bit key integer representation,
* Key = 0xK31 K30 K29.......K0
* Byte array, key[] = {0xK31, 0xK30, 0xK29, ...., 0xK0}
*
*
* Key format for non-word aligned key lengths like 521 bits.
* The key length is rounded off to next word ie, 544 bits.
*
* Example: 521-bit key, Key = 0xK65 K64 K63.......K2 K1 K0
* [bits 1-7 of K0 is expected to be zeros]
* 544 bit integer representation, Key = 0xK65 K64 K63.......K2 K1 K0 00 00
* Byte array, key[] = {0xK65, 0xK64, 0xK63, ...., 0xK2, 0xK1, 0xK0, 0x00, 0x00}
*
*/
struct tme_plaintext_key {
/**< Plain text key */
uint8_t key[TME_PT_KEY_BYTES_MAX];
/**< Length of @c key in bytes */
uint32_t length;
} __packed;
/**
* Extended Error Information structure
*/
struct tme_ext_err_info {
/* TME FW */
/**< TME FW Response status. */
uint32_t tme_err_status;
/* SEQ FW */
/**< Contents of CSR_CMD_ERROR_STATUS */
uint32_t seq_err_status;
/* SEQ HW Key Policy */
/**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS0 */
uint32_t seq_kp_err_status0;
/**< CRYPTO_ENGINE_CRYPTO_KEY_POLICY_ERROR_STATUS1 */
uint32_t seq_kp_err_status1;
/**
* Debug information: log/print this information
* if any of the above fields is non-zero
*/
/**< Contents of CSR_CMD_RESPONSE_STATUS */
uint32_t seq_rsp_status;
} __packed;
#endif /* _TME_HWKM_MASTER_DEFS_H_ */