diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h index 55c4e7b2dcf88c67d66c29d22894ef3819bdc508..35f2771602c4d666451b846658c7509017c9deb3 100644 --- a/arch/arm64/include/asm/kvm_rme.h +++ b/arch/arm64/include/asm/kvm_rme.h @@ -64,6 +64,9 @@ struct realm { unsigned long num_aux; unsigned int vmid; unsigned int ia_bits; +#ifndef __GENKSYMS__ + bool is_ccal; +#endif }; /** diff --git a/arch/arm64/include/asm/kvm_rme_ccal.h b/arch/arm64/include/asm/kvm_rme_ccal.h new file mode 100644 index 0000000000000000000000000000000000000000..21fba97941485336e9bae246a321da15e221dfb5 --- /dev/null +++ b/arch/arm64/include/asm/kvm_rme_ccal.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#ifndef __ASM_KVM_RME_CCAL_H +#define __ASM_KVM_RME_CCAL_H + +#include +#include + +enum CCAL_GRANULE_TYPES { + CCAL_GRANULE_NORMAL, + CCAL_GRANULE_DEV, + CCAL_GRANULE_NS, + CCAL_GRANULE_TYPES_NUM +}; + +static inline bool is_ccal_rvm(struct realm *realm) +{ + return realm->is_ccal; +} + +void config_realm_ccal(struct realm *realm); + +int realm_ccal_populate_region(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t ipa_end, phys_addr_t *ipa_top, + u32 flags); + +int realm_ccal_map_ram(struct kvm *kvm, + struct arm_rme_populate_realm *args); + +int ccal_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, + int max_level); + +void realm_ccal_destroy_data_range(struct kvm *kvm, unsigned long start, + unsigned long end); + +int ccal_fold_rtt_level(struct realm *realm, int level, unsigned long start, + unsigned long end); + +int ccal_fold_rtt(struct realm *realm, unsigned long addr, int level); + +#endif diff --git a/arch/arm64/include/asm/rmi_cmds.h b/arch/arm64/include/asm/rmi_cmds.h index 27cd2751f3bfa4c599c0498301ec3c8f611cdf1b..8dc752ca9a74c98a81a98b2ff356fb6ba9f8b2f2 100644 --- a/arch/arm64/include/asm/rmi_cmds.h +++ b/arch/arm64/include/asm/rmi_cmds.h @@ -505,4 +505,141 @@ static inline int rmi_rtt_unmap_unprotected(unsigned long rd, return res.a0; } +static inline int rmi_ccal_delegate_range(unsigned long start_addr, + unsigned long size) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_DELEGATE_RANGE, + start_addr, size + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_delegate_io_range(unsigned long start_addr, + unsigned long size) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_IO_DELEGATE_RANGE, + start_addr, size + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_undelegate_range(unsigned long start_addr, + unsigned long size) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_UNDELEGATE_RANGE, + start_addr, size + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_undelegate_io_range(unsigned long start_addr, + unsigned long size) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_IO_UNDELEGATE_RANGE, + start_addr, size + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_rtt_create(unsigned long rd, unsigned long ipa, + unsigned long level, unsigned long rtt1, + unsigned long rtt2) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_RTT_PAIR_CREATE, + rd, rtt1, rtt2, ipa, level + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_rtt_fold(unsigned long rd, unsigned long ipa, + unsigned long level, + unsigned long *out_rtt1, + unsigned long *out_rtt2) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_RTT_FOLD, + rd, ipa, level + }; + + arm_smccc_1_2_smc(®s, ®s); + + if (RMI_RETURN_STATUS(regs.a0) == RMI_SUCCESS) { + if (out_rtt1) + *out_rtt1 = regs.a1; + + if (out_rtt2) + *out_rtt2 = regs.a2; + } + + return regs.a0; +} + +static inline int rmi_ccal_block_create(unsigned long rd, unsigned long data, + unsigned long ipa, unsigned long src, + unsigned long flags) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_BLOCK_DATA_CREATE_LVL2, + rd, data, ipa, src, flags + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_block_create_unknown(unsigned long rd, + unsigned long data, + unsigned long ipa) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_BLOCK_DATA_CREATE_UNKNOWN_LVL2, + rd, data, ipa + }; + + arm_smccc_1_2_smc(®s, ®s); + + return regs.a0; +} + +static inline int rmi_ccal_data_destroy(unsigned long rd, unsigned long ipa, + unsigned long *pa, unsigned long *size, + unsigned long *granule_type, + unsigned long *top_ipa) +{ + struct arm_smccc_1_2_regs regs = { + SMC_RMI_HISI_EXT, CCAL_DATA_DESTROY, + rd, ipa + }; + + arm_smccc_1_2_smc(®s, ®s); + + *pa = regs.a1; + *size = regs.a2; + *granule_type = regs.a3; + *top_ipa = regs.a4; + + return regs.a0; +} + #endif /* __ASM_RMI_CMDS_H */ diff --git a/arch/arm64/include/asm/rmi_smc.h b/arch/arm64/include/asm/rmi_smc.h index 7a93a3e0ac6eb981c95468fd616d8c62a220f634..c125a2b79cef3ea91cbe8402a3c04c6ab83305c4 100644 --- a/arch/arm64/include/asm/rmi_smc.h +++ b/arch/arm64/include/asm/rmi_smc.h @@ -45,6 +45,21 @@ #define SMC_RMI_RTT_INIT_RIPAS SMC_RMI_CALL(0x0168) #define SMC_RMI_RTT_SET_RIPAS SMC_RMI_CALL(0x0169) +#define SMC_RMI_HISI_EXT SMC_RMI_CALL(0x018F) + +enum hisi_ext_cmd { + CCAL_DELEGATE_RANGE = 0, + CCAL_UNDELEGATE_RANGE, + CCAL_IO_DELEGATE_RANGE, + CCAL_IO_UNDELEGATE_RANGE, + CCAL_RTT_PAIR_CREATE, + CCAL_RTT_DESTROY, + CCAL_RTT_FOLD, + CCAL_BLOCK_DATA_CREATE_LVL2, + CCAL_BLOCK_DATA_CREATE_UNKNOWN_LVL2, + CCAL_DATA_DESTROY, +}; + #define RMI_ABI_MAJOR_VERSION 1 #define RMI_ABI_MINOR_VERSION 0 @@ -91,6 +106,7 @@ enum rmi_ripas { #define RMI_REALM_PARAM_FLAG_LPA2 BIT(0) #define RMI_REALM_PARAM_FLAG_SVE BIT(1) #define RMI_REALM_PARAM_FLAG_PMU BIT(2) +#define RMI_REALM_PARAM_FLAG_CCAL BIT(3) /* * Note many of these fields are smaller than u64 but all fields have u64 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 47eea5041899d0f9f0b3ec962381964be28c0b32..04215d303762793ddb22e3c733cdcb7bbdd163a2 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -430,10 +430,12 @@ enum { #define KVM_CAP_ARM_RME_INIT_RIPAS_REALM 2 #define KVM_CAP_ARM_RME_POPULATE_REALM 3 #define KVM_CAP_ARM_RME_ACTIVATE_REALM 4 +#define KVM_CAP_ARM_RME_MAP_RAM_CCAL 5 /* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */ #define ARM_RME_CONFIG_RPV 0 #define ARM_RME_CONFIG_HASH_ALGO 1 +#define ARM_RME_CFG_CCAL 2 #define ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA256 0 #define ARM_RME_CONFIG_MEASUREMENT_ALGO_SHA512 1 diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index f48a34d5b0bc0f4b6da51b448b5d8a92dcee8284..8542c4be71abbb5ea73e2a18a41af41cd67096ca 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -21,7 +21,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \ vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ vgic/vgic-its.o vgic/vgic-debug.o \ - rme.o rme-exit.o cca_base.o + rme.o rme-exit.o cca_base.o rme-ccal.o kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o diff --git a/arch/arm64/kvm/rme-ccal.c b/arch/arm64/kvm/rme-ccal.c new file mode 100644 index 0000000000000000000000000000000000000000..e86f61b7becd34b312a02dfb75eb8c08a66aec73 --- /dev/null +++ b/arch/arm64/kvm/rme-ccal.c @@ -0,0 +1,630 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2025-2025. All rights reserved. + */ + +#include +#include +#include +#include + +#define RMM_PAGE_SHIFT 12 +#define RMM_PAGE_SIZE BIT(RMM_PAGE_SHIFT) + +#define RMM_RTT_BLOCK_LEVEL 2 +#define RMM_RTT_MAX_LEVEL 3 + +/* See ARM64_HW_PGTABLE_LEVEL_SHIFT() */ +#define RMM_RTT_LEVEL_SHIFT(l) \ + ((RMM_PAGE_SHIFT - 3) * (4 - (l)) + 3) +#define RMM_L2_BLOCK_SIZE BIT(RMM_RTT_LEVEL_SHIFT(2)) +#define RMM_L1_BLOCK_SIZE BIT(RMM_RTT_LEVEL_SHIFT(1)) + +#define CCAL_RTT_PAGE_ORDER 1U +#define CCAL_RTT_PAGE_NUM (1U << CCAL_RTT_PAGE_ORDER) +#define CCAL_RTT_ENTRY_NUM 512U + +static inline unsigned long rme_rtt_level_mapsize(int level) +{ + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return RMM_PAGE_SIZE; + + return (1UL << RMM_RTT_LEVEL_SHIFT(level)); +} + +static bool pages_are_consecutive(struct page **pages, int num) +{ + for (int i = 1; i < num; i++) { + if (page_to_phys(pages[i]) - page_to_phys(pages[i - 1]) + != PAGE_SIZE) + return false; + } + + return true; +} + +void config_realm_ccal(struct realm *realm) +{ + realm->params->flags |= RMI_REALM_PARAM_FLAG_CCAL; + realm->is_ccal = true; +} + +static int ccal_alloc_delegated_rtt(phys_addr_t *phys) +{ + struct page *pages = alloc_pages(GFP_KERNEL, CCAL_RTT_PAGE_ORDER); + phys_addr_t pa; + + if (!pages) + return -ENOMEM; + + pa = page_to_phys(pages); + + if (rmi_ccal_delegate_range(pa, RMM_PAGE_SIZE * CCAL_RTT_PAGE_NUM)) { + __free_pages(pages, CCAL_RTT_PAGE_ORDER); + return -ENXIO; + } + + *phys = pa; + + return 0; +} + +static void ccal_free_delegated_rtt(phys_addr_t phys1, phys_addr_t phys2) +{ + if (!WARN_ON(rmi_granule_undelegate(phys1))) + free_page((unsigned long)phys_to_virt(phys1)); + + if (!WARN_ON(rmi_granule_undelegate(phys2))) + free_page((unsigned long)phys_to_virt(phys2)); +} + +int ccal_create_rtt_levels(struct realm *realm, unsigned long ipa, int level, + int max_level) +{ + unsigned long aligned_ipa; + phys_addr_t phys; + int ret; + + if (WARN_ON(level == max_level)) + return 0; + + while (level++ < max_level) { + if (ccal_alloc_delegated_rtt(&phys)) + return -ENOMEM; + + aligned_ipa = ALIGN_DOWN(ipa, rme_rtt_level_mapsize(level - 1)); + + ret = rmi_ccal_rtt_create(virt_to_phys(realm->rd), aligned_ipa, + level, phys, phys + RMM_PAGE_SIZE); + if (ret) { + ccal_free_delegated_rtt(phys, phys + RMM_PAGE_SIZE); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT && + RMI_RETURN_INDEX(ret) == level - 1) { + /* The RTT already exists, continue. */ + continue; + } + + WARN(1, "Failed to create CCAL RTT at level %d: %d\n", + level, ret); + return -ENXIO; + } + } + + return 0; +} + +/* + * Returns 0 on successful fold, a negative value on error, a positive value if + * we were not able to fold all tables at this level. + */ +int ccal_fold_rtt_level(struct realm *realm, int level, unsigned long start, + unsigned long end) +{ + int not_folded = 0; + ssize_t map_size; + unsigned long addr, next_addr; + + if (WARN_ON(level > RMM_RTT_MAX_LEVEL)) + return -EINVAL; + + map_size = rme_rtt_level_mapsize(level - 1); + + for (addr = start; addr < end; addr = next_addr) { + unsigned long protected_rtt, unprotected_rtt; + int ret; + + next_addr = ALIGN(addr + 1, map_size); + + ret = rmi_ccal_rtt_fold(virt_to_phys(realm->rd), addr, level, + &protected_rtt, &unprotected_rtt); + + switch (RMI_RETURN_STATUS(ret)) { + case RMI_SUCCESS: + ccal_free_delegated_rtt(protected_rtt, unprotected_rtt); + break; + case RMI_ERROR_RTT: + if (level == RMM_RTT_MAX_LEVEL || + RMI_RETURN_INDEX(ret) < level) { + not_folded++; + break; + } + /* Recurse a level deeper */ + ret = ccal_fold_rtt_level(realm, level + 1, addr, + next_addr); + if (ret < 0) + return ret; + else if (ret == 0) + /* Try again at this level */ + next_addr = addr; + break; + default: + WARN_ON(1); + return -ENXIO; + } + } + + return not_folded; +} + +int ccal_fold_rtt(struct realm *realm, unsigned long addr, int level) +{ + unsigned long protected_rtt, unprotected_rtt; + int ret; + + ret = rmi_ccal_rtt_fold(virt_to_phys(realm->rd), addr, level, + &protected_rtt, &unprotected_rtt); + if (ret) + return ret; + + ccal_free_delegated_rtt(protected_rtt, unprotected_rtt); + + return 0; +} + +static int ccal_create_data_page(struct realm *realm, unsigned long ipa, + struct page *dst_page, struct page *src_page, + unsigned long flags) +{ + phys_addr_t rd = virt_to_phys(realm->rd); + phys_addr_t dst_phys, src_phys; + int ret; + + copy_page(page_address(src_page), page_address(dst_page)); + + dst_phys = page_to_phys(dst_page); + src_phys = page_to_phys(src_page); + + if (rmi_granule_delegate(dst_phys)) + return -ENXIO; + + ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry. */ + int err_level = RMI_RETURN_INDEX(ret); + + ret = ccal_create_rtt_levels(realm, ipa, err_level, + RMM_RTT_MAX_LEVEL); + if (ret) + goto err; + + ret = rmi_data_create(rd, dst_phys, ipa, src_phys, flags); + } + + if (ret) + goto err; + + return 0; +err: + if (WARN_ON(rmi_granule_undelegate(dst_phys))) { + /* Page can't be returned to NS world so is lost. */ + get_page(dst_page); + } + return -ENXIO; +} + +static int ccal_create_data_page_unknown(struct realm *realm, unsigned long ipa, + struct page *page) +{ + phys_addr_t rd = virt_to_phys(realm->rd); + phys_addr_t phys = page_to_phys(page); + int ret; + + if (rmi_granule_delegate(phys)) { + /* Race with another thread. */ + return 0; + } + + ret = rmi_data_create_unknown(rd, phys, ipa); + + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry. */ + int err_level = RMI_RETURN_INDEX(ret); + + ret = ccal_create_rtt_levels(realm, ipa, err_level, + RMM_RTT_MAX_LEVEL); + if (ret) + goto err; + + ret = rmi_data_create_unknown(rd, phys, ipa); + } + + if (ret) + goto err; + + return 0; +err: + if (WARN_ON(rmi_granule_undelegate(phys))) { + /* Page can't be returned to NS world so is lost. */ + get_page(phys_to_page(phys)); + } + return -ENXIO; +} + +static int ccal_create_data_block(struct realm *realm, unsigned long ipa, + struct page **dst_pages, + struct page *tmp_block, unsigned long flags) +{ + phys_addr_t dst_phys, tmp_phys; + int ret; + + memcpy(page_address(tmp_block), page_address(dst_pages[0]), + RMM_L2_BLOCK_SIZE); + + dst_phys = page_to_phys(dst_pages[0]); + tmp_phys = page_to_phys(tmp_block); + + if (rmi_ccal_delegate_range(dst_phys, RMM_L2_BLOCK_SIZE) != RMI_SUCCESS) + return -ENXIO; + + ret = rmi_ccal_block_create(virt_to_phys(realm->rd), dst_phys, ipa, + tmp_phys, flags); + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry. */ + int err_level = RMI_RETURN_INDEX(ret); + + ret = ccal_create_rtt_levels(realm, ipa, err_level, + RMM_RTT_BLOCK_LEVEL); + if (ret) + goto err_undelegate; + + ret = rmi_ccal_block_create(virt_to_phys(realm->rd), dst_phys, + ipa, tmp_phys, flags); + } + + if (ret) + goto err_undelegate; + + return 0; + +err_undelegate: + if (WARN_ON(rmi_ccal_undelegate_range(dst_phys, RMM_L2_BLOCK_SIZE))) { + for (int i = 0, offset = 0; offset < RMM_L2_BLOCK_SIZE; + i++, offset += PAGE_SIZE) { + /* Pages can't be returned to NS world so are lost. */ + get_page(dst_pages[i]); + } + } + return -ENXIO; +} + +static int ccal_create_data_block_unknown(struct realm *realm, + struct page **dst_pages, + unsigned long ipa) +{ + phys_addr_t dst_phys; + int ret; + + dst_phys = page_to_phys(dst_pages[0]); + + if (rmi_ccal_delegate_range(dst_phys, RMM_L2_BLOCK_SIZE)) { + /* Race with another thread. */ + return 0; + } + + ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), dst_phys, + ipa); + if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) { + /* Create missing RTTs and retry. */ + int err_level = RMI_RETURN_INDEX(ret); + + ret = ccal_create_rtt_levels(realm, ipa, err_level, + RMM_RTT_BLOCK_LEVEL); + if (ret) + goto err_undelegate; + + ret = rmi_ccal_block_create_unknown(virt_to_phys(realm->rd), + dst_phys, ipa); + } + if (ret) + goto err_undelegate; + + return 0; + +err_undelegate: + if (WARN_ON(rmi_ccal_undelegate_range(dst_phys, RMM_L2_BLOCK_SIZE))) { + for (int i = 0, offset = 0; offset < RMM_L2_BLOCK_SIZE; + i++, offset += PAGE_SIZE) { + /* Pages can't be returned to NS world so are lost. */ + get_page(dst_pages[i]); + } + } + + return -ENXIO; +} + +int realm_ccal_populate_region(struct kvm *kvm, phys_addr_t ipa_base, + phys_addr_t ipa_end, phys_addr_t *ipa_top, + u32 flags) +{ + struct realm *realm = &kvm->arch.realm; + struct kvm_memory_slot *memslot; + struct page *tmp_pages = NULL; + unsigned long data_flags = 0; + gfn_t base_gfn, top_gfn; + int nr_pages, nr_pinned; + struct page **pages; + unsigned int order; + unsigned long hva; + bool block_map; + int idx; + int ret; + + if (ipa_base == ipa_end) + return 0; + + if (flags & KVM_ARM_RME_POPULATE_FLAGS_MEASURE) + data_flags = RMI_MEASURE_CONTENT; + + if (ipa_base == ALIGN_DOWN(ipa_base, RMM_L2_BLOCK_SIZE) && + ipa_end - ipa_base >= RMM_L2_BLOCK_SIZE) { + *ipa_top = ipa_base + RMM_L2_BLOCK_SIZE; + block_map = true; + } else { + *ipa_top = min(ipa_end, ALIGN_DOWN(ipa_base + RMM_L2_BLOCK_SIZE, + RMM_L2_BLOCK_SIZE)); + block_map = false; + } + + base_gfn = gpa_to_gfn(ipa_base); + top_gfn = gpa_to_gfn(*ipa_top); + nr_pages = top_gfn - base_gfn; + + idx = srcu_read_lock(&kvm->srcu); + memslot = gfn_to_memslot(kvm, base_gfn); + if (!memslot) { + ret = -EFAULT; + goto out_srcu; + } + + /* We require the region to be contained within a single memslot. */ + if (memslot->base_gfn + memslot->npages < top_gfn) { + ret = -EINVAL; + goto out_srcu; + } + + hva = gfn_to_hva_memslot(memslot, gpa_to_gfn(ipa_base)); + if (kvm_is_error_hva(hva)) { + ret = -EINVAL; + goto out_srcu; + } + + pages = kmalloc(CCAL_RTT_ENTRY_NUM * sizeof(*pages), GFP_KERNEL); + nr_pinned = pin_user_pages_fast(hva, nr_pages, FOLL_WRITE, pages); + if (nr_pinned != nr_pages) { + ret = -EFAULT; + goto out_pin; + } + + if (block_map && !IS_ALIGNED(page_to_phys(pages[0]), RMM_L2_BLOCK_SIZE)) + block_map = false; + + if (block_map && !pages_are_consecutive(pages, nr_pinned)) + block_map = false; + + if (block_map) + order = get_order(RMM_L2_BLOCK_SIZE); + else + order = get_order(RMM_PAGE_SIZE); + + tmp_pages = alloc_pages(GFP_KERNEL, order); + if (!tmp_pages) { + ret = -ENOMEM; + goto out_pin; + } + + if (block_map) { + ret = ccal_create_data_block(realm, ipa_base, pages, tmp_pages, + data_flags); + if (ALIGN(ipa_base, RMM_L1_BLOCK_SIZE) == + (ipa_base + RMM_L2_BLOCK_SIZE)) + ccal_fold_rtt(realm, + ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), + RMM_RTT_BLOCK_LEVEL); + } else { + for (int i = 0; i < nr_pinned; i++) { + ret = ccal_create_data_page(realm, ipa_base, pages[i], + tmp_pages, data_flags); + if (ret) + break; + ipa_base += RMM_PAGE_SIZE; + } + } + + if (ret == 0) + goto out_free; +out_pin: + unpin_user_pages(pages, nr_pinned); +out_free: + kfree(pages); + if (tmp_pages) + __free_pages(tmp_pages, order); +out_srcu: + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + +static int ccal_map_range(struct kvm *kvm, unsigned long ipa_base, + unsigned long ipa_top) +{ + struct realm *realm = &kvm->arch.realm; + struct kvm_memory_slot *memslot; + gfn_t base_gfn, top_gfn; + int nr_pages, nr_pinned; + struct page **pages; + unsigned long hva; + bool block_map; + int idx; + int ret; + + if (IS_ALIGNED(ipa_base, RMM_L2_BLOCK_SIZE) && + (ipa_top - ipa_base == RMM_L2_BLOCK_SIZE)) + block_map = true; + else + block_map = false; + + base_gfn = gpa_to_gfn(ipa_base); + top_gfn = gpa_to_gfn(ipa_top); + + nr_pages = top_gfn - base_gfn; + + idx = srcu_read_lock(&kvm->srcu); + memslot = gfn_to_memslot(kvm, base_gfn); + if (!memslot) { + ret = -EFAULT; + goto out_srcu; + } + + /* We require the region to be contained within a single memslot. */ + if (memslot->base_gfn + memslot->npages < top_gfn) { + ret = -EFAULT; + goto out_srcu; + } + + hva = gfn_to_hva_memslot(memslot, gpa_to_gfn(ipa_base)); + pages = kmalloc(CCAL_RTT_ENTRY_NUM * sizeof(*pages), GFP_KERNEL); + nr_pinned = pin_user_pages_fast(hva, nr_pages, FOLL_WRITE, pages); + if (nr_pinned != nr_pages) { + ret = -EFAULT; + goto out_pin; + } + + if (block_map && !IS_ALIGNED(page_to_phys(pages[0]), RMM_L2_BLOCK_SIZE)) + block_map = false; + + if (block_map && !pages_are_consecutive(pages, nr_pinned)) + block_map = false; + + if (block_map) { + ret = ccal_create_data_block_unknown(realm, pages, ipa_base); + if (ALIGN(ipa_base, RMM_L1_BLOCK_SIZE) == + (ipa_base + RMM_L2_BLOCK_SIZE)) + ccal_fold_rtt(realm, + ALIGN_DOWN(ipa_base, RMM_L1_BLOCK_SIZE), + RMM_RTT_BLOCK_LEVEL); + } else { + for (int i = 0; i < nr_pinned; i++) { + ret = ccal_create_data_page_unknown(realm, ipa_base, + pages[i]); + if (ret) + break; + + ipa_base += RMM_PAGE_SIZE; + } + } + + if (ret == 0) + goto out_free; +out_pin: + unpin_user_pages(pages, nr_pinned); +out_free: + kfree(pages); +out_srcu: + srcu_read_unlock(&kvm->srcu, idx); + return ret; +} + +int realm_ccal_map_ram(struct kvm *kvm, + struct arm_rme_populate_realm *args) +{ + phys_addr_t ipa_base, ipa_end, next_ipa; + int ret; + + if (kvm_realm_state(kvm) != REALM_STATE_NEW) + return -EINVAL; + + ipa_base = args->base; + ipa_end = ipa_base + args->size; + + if (!IS_ALIGNED(ipa_base, PAGE_SIZE) || + !IS_ALIGNED(ipa_end, PAGE_SIZE) || + ipa_base > ipa_end) + return -EINVAL; + + if (ipa_base == ipa_end) + return 0; + + while (ipa_base < ipa_end) { + next_ipa = min(ipa_end, ALIGN_DOWN(ipa_base + RMM_L2_BLOCK_SIZE, + RMM_L2_BLOCK_SIZE)); + ret = ccal_map_range(kvm, ipa_base, next_ipa); + if (ret) { + break; + } + + ipa_base = next_ipa; + cond_resched(); + } + + return ret; +} + +static int ccal_destroy_data(struct realm *realm, unsigned long ipa, + unsigned long *next_addr) +{ + unsigned long pa, size, granule_type, offset; + unsigned long rd = virt_to_phys(realm->rd); + int ret; + + ret = rmi_ccal_data_destroy(rd, ipa, &pa, &size, &granule_type, + next_addr); + + if (WARN_ON(ret)) + return -ENXIO; + + if (granule_type == CCAL_GRANULE_NORMAL) + ret = rmi_ccal_undelegate_range(pa, size); + else + goto out_unpin; + + /* + * If the undelegate fails then something has gone seriously + * wrong: take an extra reference to just leak pages. + */ + if (WARN_ON(ret)) { + for (offset = 0; offset < size; offset += PAGE_SIZE) + get_page(phys_to_page(pa + offset)); + } + +out_unpin: + for (offset = 0; offset < size; offset += PAGE_SIZE) + unpin_user_page(phys_to_page(pa + offset)); + + return 0; +} + +void realm_ccal_destroy_data_range(struct kvm *kvm, unsigned long start, + unsigned long end) +{ + struct realm *realm = &kvm->arch.realm; + unsigned long next_addr, addr; + int ret; + + for (addr = start; addr < end; addr = next_addr) { + ret = ccal_destroy_data(realm, addr, &next_addr); + if (ret) + break; + cond_resched_rwlock_write(&kvm->mmu_lock); + } +} \ No newline at end of file diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c index d60f99899bb8c513dc1c548c6af562d1c695c528..a7f6a2a2ecb9e655788e2900edd55600cdb64537 100644 --- a/arch/arm64/kvm/rme.c +++ b/arch/arm64/kvm/rme.c @@ -11,6 +11,7 @@ #include #include +#include #include static unsigned long rmm_feat_reg0; @@ -565,6 +566,9 @@ static int realm_create_rtt_levels(struct realm *realm, int max_level, struct kvm_mmu_memory_cache *mc) { + if (is_ccal_rvm(realm)) + return ccal_create_rtt_levels(realm, ipa, level, max_level); + if (level == max_level) return 0; @@ -721,6 +725,12 @@ void kvm_realm_unmap_range(struct kvm *kvm, unsigned long start, if (realm->state == REALM_STATE_NONE) return; + if (is_ccal_rvm(realm)) { + if (unmap_private) + realm_ccal_destroy_data_range(kvm, start, end); + return; + } + realm_unmap_shared_range(kvm, find_map_level(realm, start, end), start, end); if (unmap_private) @@ -1105,9 +1115,13 @@ static int kvm_populate_realm(struct kvm *kvm, */ while (ipa_base < ipa_end) { phys_addr_t end = min(ipa_end, ipa_base + SZ_2M); + int ret; - int ret = populate_region(kvm, ipa_base, end, - args->flags); + if (is_ccal_rvm(&kvm->arch.realm)) + ret = realm_ccal_populate_region(kvm, ipa_base, ipa_end, + &end, args->flags); + else + ret = populate_region(kvm, ipa_base, end, args->flags); if (ret) return ret; @@ -1171,6 +1185,9 @@ static int realm_set_ipa_state(struct kvm_vcpu *vcpu, *top_ipa = ipa; + if (is_ccal_rvm(realm)) + return ret; + if (ripas == RMI_EMPTY && ipa != start) realm_unmap_private_range(kvm, start, ipa); @@ -1349,6 +1366,9 @@ static int kvm_rme_config_realm(struct kvm *kvm, struct kvm_enable_cap *cap) case ARM_RME_CONFIG_HASH_ALGO: r = config_realm_hash_algo(realm, &cfg); break; + case ARM_RME_CFG_CCAL: + config_realm_ccal(realm); + break; default: r = -EINVAL; } @@ -1397,6 +1417,18 @@ int _kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_RME_ACTIVATE_REALM: r = kvm_activate_realm(kvm); break; + case KVM_CAP_ARM_RME_MAP_RAM_CCAL: { + struct arm_rme_populate_realm args; + void __user *argp = u64_to_user_ptr(cap->args[1]); + + if (copy_from_user(&args, argp, sizeof(args))) { + r = -EFAULT; + break; + } + + r = realm_ccal_map_ram(kvm, &args); + break; + } default: r = -EINVAL; break;