From b76324fe12ecb7a6722e82ae353bbad2ec9e3dda Mon Sep 17 00:00:00 2001 From: NiZhiguang Date: Wed, 11 Jun 2025 06:08:26 +0000 Subject: [PATCH 01/10] crypto: ccp: Fix the update decision logic when upgrading Hygon CSV firmware Upstream: no Do not update CSV firmware when CSV version is below 1667 in hygon cpu. Hygon-SIG: commit none hygon crypto: ccp: Fix the update decision logic when upgrading Hygon CSV firmware Signed-off-by: NiZhiguang Signed-off-by: hanliyang --- drivers/crypto/ccp/sev-dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 41cd4a3634cd..7059a212ac4d 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1713,8 +1713,8 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15) && - !(is_vendor_hygon() && csv_version_greater_or_equal(1667))) { + if (!sev_version_greater_or_equal(0, 15) || + (is_vendor_hygon() && !csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } -- Gitee From 45a3969252515963fd691840e122e54d52c872eb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 30 Dec 2024 17:36:12 +0800 Subject: [PATCH 02/10] x86: mm: Fix objtool warning about bool noinstr csv3_active() Upstream: no This change will fix the objtool warning: vmlinux.o: warning: objtool: csv3_active+0x39: call to native_cpuid.constprop.0() leaves .noinstr.text section Hygon-SIG: commit none hygon x86: mm: Fix objtool warning about bool noinstr csv3_active() Fixes: ce19f23605e1 ("x86/kernel: Add CSV3 early update(enc/dec)/reset memory helpers") Signed-off-by: hanliyang --- arch/x86/mm/mem_encrypt_hygon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index da42e32f66e0..69fd83765c1b 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -114,7 +114,7 @@ static bool __init __maybe_unused csv3_check_cpu_support(void) } /* csv3_active() indicate whether the guest is protected by CSV3 */ -bool noinstr csv3_active(void) +bool csv3_active(void) { if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { u32 eax = 0; -- Gitee From 31efb11ea75e3726fed187d3979098261551860f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 24 Sep 2025 17:40:24 +0800 Subject: [PATCH 03/10] crypto: ccp: Define CSV3 LAUNCH_FINISH_EX command id Upstream: no In some scenarios, the users require the host to supply configurations or data to the guest so that they can use the configurations or data for the apps in the guest. The configurations or data are not confidential, so the host can see them. Hygon-SIG: commit none hygon crypto: ccp: Define CSV3 LAUNCH_FINISH_EX command id Signed-off-by: hanliyang --- drivers/crypto/ccp/hygon/csv-dev.c | 1 + include/linux/psp-hygon.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index bcf978004334..151179113f33 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -65,6 +65,7 @@ int csv_cmd_buffer_len(int cmd) case CSV3_CMD_RECEIVE_ENCRYPT_DATA: return sizeof(struct csv3_data_receive_encrypt_data); case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + case CSV3_CMD_LAUNCH_FINISH_EX: return sizeof(struct csv3_data_launch_finish_ex); return sizeof(struct csv3_data_receive_encrypt_context); default: return 0; } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 181c046699f2..1b5be9efb628 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -42,6 +42,7 @@ enum csv3_cmd { CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + CSV3_CMD_LAUNCH_FINISH_EX = 0x204, /* Guest NPT(Nested Page Table) management commands */ CSV3_CMD_UPDATE_NPT = 0x203, @@ -207,6 +208,21 @@ struct csv3_data_launch_encrypt_vmcb { u32 secure_vmcb_len; /* Out */ } __packed; +/** + * struct csv3_data_launch_finish_ex - CSV3_CMD_LAUNCH_FINISH_EX command + * + * @handle: handle assigned to the VM + * @reserved0: reserved field, for future use + * @host_data: the host supplied data + * @reserved1: reserved field, for future use + */ +struct csv3_data_launch_finish_ex { + u32 handle; /* In */ + u32 reserved0; /* In */ + u8 host_data[64]; /* In */ + u8 reserved1[184]; /* In */ +} __packed; + /** * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command * -- Gitee From 3aa06509114c5512a30f2d559244a6997c5bebdb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 24 Sep 2025 17:49:17 +0800 Subject: [PATCH 04/10] crypto: ccp: Introduce CSV_EXT_CSV3_LFINISH_EX extension flag Upstream: no Since build 2393 of the CSV firmware, the CSV3_LAUNCH_FINISH_EX firmware API command has been supported. To enable callers to check the current firmware's support for this command, an LFINISH_EX flag bit is introduced. Hygon-SIG: commit none hygon Introduce CSV_EXT_CSV3_LFINISH_EX extension flag Signed-off-by: hanliyang --- drivers/crypto/ccp/hygon/csv-dev.c | 6 ++++++ include/linux/psp-hygon.h | 2 ++ 2 files changed, 8 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 151179113f33..92a0100532fc 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -674,6 +674,12 @@ int csv_get_extension_info(void *buf, size_t *size) *(uint32_t *)buf |= CSV_EXT_CSV3_INJ_SECRET; } + /* Since firmware with build id 2393, support: + * c. issue CSV3_LAUNCH_FINISH_EX command + */ + if (csv_version_greater_or_equal(2393)) + *(uint32_t *)buf |= CSV_EXT_CSV3_LFINISH_EX; + return 0; } EXPORT_SYMBOL_GPL(csv_get_extension_info); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 1b5be9efb628..af3770c320bc 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -24,6 +24,8 @@ #define CSV_EXT_CSV3_MULT_LUP_DATA (1 << CSV_EXT_CSV3_MULT_LUP_DATA_BIT) #define CSV_EXT_CSV3_INJ_SECRET_BIT 1 #define CSV_EXT_CSV3_INJ_SECRET (1 << CSV_EXT_CSV3_INJ_SECRET_BIT) +#define CSV_EXT_CSV3_LFINISH_EX_BIT 2 +#define CSV_EXT_CSV3_LFINISH_EX (1 << CSV_EXT_CSV3_LFINISH_EX_BIT) /** * Guest/platform management commands for CSV -- Gitee From 34ed027a34d7abe06c936c2b114cc93322211e34 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 24 Sep 2025 19:10:17 +0800 Subject: [PATCH 05/10] KVM: SVM: CSV: Add KVM_CSV3_LAUNCH_FINISH_EX command Upstream: no For newer CSV3 firmware, the CSV3_LAUNCH_FINISH_EX command is supported. Once the VMM and KVM have negotiated that the softstack can issue CSV3_LAUNCH_FINISH_EX, the userspace VMM can determine whether to pass host-supplied data to the confidential guest's context by issuing this command. Hygon-SIG: commit none hygon Add KVM_CSV3_LAUNCH_FINISH_EX command Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 34 ++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 10 ++++++++++ 2 files changed, 44 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 63c15f212c38..9c3eab5b542a 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1632,6 +1632,35 @@ static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv3_launch_finish_ex(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_finish_ex params; + struct csv3_data_launch_finish_ex *finish_ex = NULL; + int ret = 0; + + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_LFINISH_EX)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + finish_ex = kzalloc(sizeof(*finish_ex), GFP_KERNEL); + if (!finish_ex) + return -ENOMEM; + + finish_ex->handle = csv->sev->handle; + memcpy(finish_ex->host_data, params.host_data, KVM_CSV3_HOST_DATA_SIZE); + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, CSV3_CMD_LAUNCH_FINISH_EX, + finish_ex, &argp->error); + + kfree(finish_ex); + + return ret; +} + /* Userspace wants to query either header or trans length. */ static int csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, @@ -2775,6 +2804,9 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); break; + case KVM_CSV3_LAUNCH_FINISH_EX: + r = csv3_launch_finish_ex(kvm, &sev_cmd); + break; case KVM_CSV3_SEND_ENCRYPT_DATA: r = csv3_send_encrypt_data(kvm, &sev_cmd); break; @@ -3095,6 +3127,8 @@ static int csv_get_hygon_coco_extension(struct kvm *kvm) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA; if (csv->fw_ext & CSV_EXT_CSV3_INJ_SECRET) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET; + if (csv->fw_ext & CSV_EXT_CSV3_LFINISH_EX) + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_LFINISH_EX; } csv->kvm_ext_valid = true; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e6184731588e..7080b43ac54e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1245,6 +1245,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYGON_COCO_EXT_CSV3_MULT_LUP_DATA (1 << 1) /* support request to inject secret to CSV3 guest */ #define KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET (1 << 2) +/* support finish launch process by CSV3_CMD_LAUNCH_FINISH_EX firmware API */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_LFINISH_EX (1 << 3) #ifdef KVM_CAP_IRQ_ROUTING @@ -2149,6 +2151,7 @@ enum csv3_cmd_id { KVM_CSV3_HANDLE_MEMORY, KVM_CSV3_SET_GUEST_PRIVATE_MEMORY = 0xc8, + KVM_CSV3_LAUNCH_FINISH_EX = 0xc9, KVM_CSV3_NR_MAX, }; @@ -2163,6 +2166,13 @@ struct kvm_csv3_launch_encrypt_data { __u32 len; }; +#define KVM_CSV3_HOST_DATA_SIZE 64 + +struct kvm_csv3_launch_finish_ex { + __u8 host_data[KVM_CSV3_HOST_DATA_SIZE]; + __u8 pad[16]; +}; + struct kvm_csv3_send_encrypt_data { __u64 hdr_uaddr; __u32 hdr_len; -- Gitee From ebc0055521116f95384fd35054bd21cf74bec49f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 20 Nov 2025 19:52:35 +0800 Subject: [PATCH 06/10] KVM: SVM: CSV: Handle #NPF gracefully for CSV3 VM Upstream: no The #NPF handling will influence the running of the guest. Printing appropriate messages for #NPF failures will assist in the analysis of the VM. If -ENOMEM occurs during #NPF handling, it means the system memory is under pressure, and we suggest resuming the guest and triggering #NPF again. Hygon-SIG: commit none hygon KVM: SVM: CSV: Handle #NPF gracefully for CSV3 VM Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 9c3eab5b542a..179a944df4ff 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -2240,6 +2240,7 @@ static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); if (!update_npt) { + WARN_ONCE(1, "Failure allocate npt command\n"); r = -ENOMEM; goto exit; } @@ -2253,8 +2254,10 @@ static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code r = hygon_kvm_hooks.sev_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); - if (psp_ret != SEV_RET_SUCCESS) + if (psp_ret != SEV_RET_SUCCESS) { + WARN_ONCE(1, "Failure update NPT\n"); r = -EFAULT; + } kfree(update_npt); exit: @@ -2319,8 +2322,10 @@ static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, NULL, NULL); - if (unlikely(is_error_pfn(tmp_pfn))) - return -ENOMEM; + if (unlikely(is_error_pfn(tmp_pfn))) { + WARN_ONCE(1, "Invalid pfn\n"); + return -EINVAL; + } if (csv3_is_mmio_pfn(tmp_pfn)) { *pfn = tmp_pfn; @@ -2347,6 +2352,7 @@ static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, if (npinned != 1) { mmap_write_unlock(current->mm); kmem_cache_free(csv->sp_slab, sp); + pr_err_ratelimited("Failure pin gfn:0x%llx\n", gfn); return -ENOMEM; } @@ -2519,8 +2525,12 @@ static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, mutex_lock(&csv->sp_lock); ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); mutex_unlock(&csv->sp_lock); - if (ret) + if (ret) { + /* Resume guest to retry #NPF. */ + if (ret == -ENOMEM) + ret = 0; goto exit; + } level = csv3_mapping_level(vcpu, gfn, pfn, slot); } @@ -2528,8 +2538,10 @@ static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, &psp_ret, pfn, level); - if (psp_ret != SEV_RET_SUCCESS) + if (psp_ret != SEV_RET_SUCCESS) { + WARN_ONCE(1, "Failure update NPT\n"); ret = -EFAULT; + } exit: return ret; } -- Gitee From a324a39b25d875b80ac74694e000d3c03795c6fb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 11 Dec 2025 10:23:02 +0800 Subject: [PATCH 07/10] KVM: SVM: CSV: Enhance shared page manager for CSV3 VM Upstream: no Currently, shared memory regions of CSV3 VMs are excluded from CMA. When a GPA range in a CSV3 guest is marked as shared, KVM allocates and pins physical pages from outside the CMA region and updates the corresponding physical addresses in the guest's NPT. If this GPA range later transitions back from shared to private, the previously pinned pages must be safely unpinned. Ideally, all shared GPA ranges in CSV3 VMs would be aligned to 2MB boundaries, and their backing physical pages would be 2MB THP or hugetlb pages. In practice, however, shared memory may be backed by a mix of 4KB base pages and compound (huge) pages. The current unpinning logic cannot distinguish whether a page to be unpinned is a single 4KB page or part of a compound page. If a compound page contains multiple subpages that belong to different shared GPA ranges, prematurely unpining the entire compound page could corrupt other still-shared regions. To address this issue, we introduce an `order` field and a per-compound-page bitmap in the shared page manager. Whenever a subpage belongs to any shared GPA range, its corresponding bit in the bitmap is set. A compound page is unpinned only when **all** of its subpages are no longer associated with any shared GPA range. Hygon-SIG: commit none hygon KVM: SVM: CSV: Enhance shared page manager for CSV3 VM Signed-off-by: hanliyang --- arch/x86/kvm/svm/csv.c | 666 +++++++++++++++++++++++++++++++++++---- arch/x86/kvm/trace.h | 100 ++++++ arch/x86/kvm/x86.c | 4 + include/uapi/linux/kvm.h | 72 ++++- 4 files changed, 767 insertions(+), 75 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 179a944df4ff..8c8a92fd692a 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -23,6 +23,8 @@ #include "csv.h" #include "x86.h" +#include "trace.h" + #undef pr_fmt #define pr_fmt(fmt) "CSV: " fmt @@ -855,18 +857,35 @@ enum csv3_pg_level { /* * Manage shared page in rbtree, the node within the rbtree - * is indexed by gfn. @page points to the page mapped by @gfn + * is indexed by track_hva. @track_page points to the page mapped by @track_hva * in NPT. */ struct shared_page { struct rb_node node; - gfn_t gfn; - struct page *page; + /* The hva corresponds to @track_page. */ + u64 track_hva; + /* Page order of @track_page. */ + unsigned int order; + /* + * This field is NULL when @order equals 0, but must point to a + * valid bitmap when @order > 0 (i.e., for huge pages and THP). + */ + unsigned long *bitmap; + /* + * Pointer to the head page of the page block. + * Always points to the head page, whether @order is 0 (a single 4K + * page) or greater (a huge page or THP). + */ + struct page *track_page; }; struct shared_page_mgr { + /* The root of the manager tree */ struct rb_root root; + /* The count of shared_page entries in the manager tree */ u64 count; + /* The total pages tracked by the manager */ + unsigned long nr_pages; }; struct kvm_csv_info { @@ -900,8 +919,25 @@ struct secure_memory_region { u64 hpa; }; -static bool shared_page_insert(struct shared_page_mgr *mgr, - struct shared_page *sp) +/** + * insert_shared_page_entry_locked - Insert a shared_page into the manager tree + * @mgr: Pointer to the shared page manager (must be valid). + * @new_sp: The shared_page to insert. + * @old_sp: Pointer to store conflicting entry if insertion fails (can be NULL). + * + * Attempts to insert @new_sp into the rbtree of @mgr. If an entry with the + * same key (e.g., track_hva) already exists, the insertion is aborted and + * the existing entry is returned via @old_sp. + * + * The caller must hold the appropriate lock protecting @mgr. + * + * Return: + * %true if @new_sp was successfully inserted; + * %false if a duplicate exists (in which case @old_sp is set if non-NULL). + */ +static bool insert_shared_page_entry_locked(struct shared_page_mgr *mgr, + struct shared_page *new_sp, + struct shared_page **old_sp) { struct shared_page *sp_iter; struct rb_root *root; @@ -916,58 +952,285 @@ static bool shared_page_insert(struct shared_page_mgr *mgr, sp_iter = rb_entry(*new, struct shared_page, node); parent = *new; - if (sp->gfn < sp_iter->gfn) + if (new_sp->track_hva < sp_iter->track_hva) { new = &((*new)->rb_left); - else if (sp->gfn > sp_iter->gfn) + } else if (new_sp->track_hva > sp_iter->track_hva) { new = &((*new)->rb_right); - else + } else { + struct folio *folio = page_folio(sp_iter->track_page); + + trace_kvm_csv3_sp_insert_dup(page_to_pfn(new_sp->track_page), + new_sp->track_hva, + new_sp->order, + page_to_pfn(sp_iter->track_page), + sp_iter->track_hva, + sp_iter->order); + + /* + * If found the same @track_page during insertion, the + * @track_page must be pinned more than once. + */ + if (new_sp->track_page == sp_iter->track_page && + ((folio_test_large(folio) && + atomic_read(&folio->_pincount) < 2) || + (!folio_test_large(folio) && + ((unsigned int)folio_ref_count(folio) + < GUP_PIN_COUNTING_BIAS * 2)))) + pr_err_ratelimited("%s: SP_MGR_ERR: pfn:0x%lx" + " order:%d pincount < 2\n", + __func__, + page_to_pfn(sp_iter->track_page), + sp_iter->order); + + if (old_sp) + *old_sp = sp_iter; return false; + } } + trace_kvm_csv3_sp_insert(page_to_pfn(new_sp->track_page), + new_sp->track_hva, + new_sp->order); + /* Add new node and rebalance tree. */ - rb_link_node(&sp->node, parent, new); - rb_insert_color(&sp->node, root); + rb_link_node(&new_sp->node, parent, new); + rb_insert_color(&new_sp->node, root); + + /* Update shared page statistics */ mgr->count++; + mgr->nr_pages += 1UL << new_sp->order; return true; } -static struct shared_page *shared_page_search(struct shared_page_mgr *mgr, - gfn_t gfn) +/** + * search_shared_page_entry_locked - Search for a shared page covering @hva + * @mgr: Pointer to the shared page manager. + * @hva: Host virtual address to search for. + * + * Searches the rbtree of @mgr for a shared page that maps @hva. The search + * iterates through supported page orders (e.g., 4K, 2M, 1G) in ascending order + * (starting from order 0). + * + * The caller must hold the lock protecting @mgr. + * + * Return: Pointer to matching shared_page, or NULL if not found. + */ +static +struct shared_page *search_shared_page_entry_locked(struct shared_page_mgr *mgr, + u64 hva) { struct shared_page *sp; struct rb_root *root; struct rb_node *node; + u64 track_hva; + unsigned int order = 0; + +again: + track_hva = (hva & ~((1ULL << (order + PAGE_SHIFT)) - 1)); root = &mgr->root; node = root->rb_node; while (node) { sp = rb_entry(node, struct shared_page, node); - if (gfn < sp->gfn) + if (track_hva < sp->track_hva) node = node->rb_left; - else if (gfn > sp->gfn) + else if (track_hva > sp->track_hva) node = node->rb_right; else - return sp; + return (track_hva == hva || sp->order == order) + ? sp : NULL; + } + + if (order == 0) { + order = PMD_SHIFT - PAGE_SHIFT; + goto again; + } else if (order == (PMD_SHIFT - PAGE_SHIFT)) { + order = PUD_SHIFT - PAGE_SHIFT; + goto again; } return NULL; } -static struct shared_page *shared_page_remove(struct shared_page_mgr *mgr, - gfn_t gfn) +/** + * shared_page_entry_set_bit_locked - Set bitmap in a shared page entry if found + * @mgr: Pointer to the shared page manager (caller must hold its lock). + * @hva: Host virtual address identifying the 4K subpage of a compound page. + * @sp: Optional hint pointer to the shared_page entry; if NULL, the entry is + * looked up in @mgr's rbtree using @hva. + * + * If @sp is NULL, the function searches for the entry in the manager's tree. + * + * For compound pages (@order > 0), a bitmap tracks which 4K subpages are + * shared memory for CSV3 VM. The bit corresponding to the offset of @hva within + * the compound page is set atomically. + * + * On every successful found (including repeated hits), a trace event + * kvm_csv3_sp_hit is emitted with the PFN, order. + * + * Return: + * Pointer to the shared_page entry if found, or NULL if no entry covers @hva. + */ +static +struct shared_page *shared_page_entry_set_bit_locked(struct shared_page_mgr *mgr, + u64 hva, + struct shared_page *sp) +{ + /* If @sp is NULL, we need search entry from the manager tree. */ + if (!sp) + sp = search_shared_page_entry_locked(mgr, hva); + + if (sp) { + if (sp->order) { + unsigned int pg_off = (hva & ~sp->track_hva) >> PAGE_SHIFT; + + set_bit(pg_off, sp->bitmap); + } + + trace_kvm_csv3_sp_hit(page_to_pfn(sp->track_page), + sp->track_hva, hva, sp->order); + } + + return sp; +} + +/** + * remove_shared_page_entry_locked - Remove a shared_page entry if any subpages + * are not shared memory of CSV3 VM. + * @mgr: Pointer to the shared page manager (lock must be held by caller). + * @hva: Host virtual address identifying the subpage. + * + * This function attempts to remove a shared_page entry from the manager when it + * does not contain CSV3 VM's shared memory. The entry is only removed if: + * - All 4K subpages within the compound page are not VM's shared memory + * - The underlying physical page is not pinned by any other user + * + * If the page is found to be pinned more than once, the removal is aborted + * the page is used by multiple users. + * + * Context: Caller must hold the lock protecting @mgr. + * + * Return: + * Pointer to the removed shared_page entry on success, or NULL if not found, + * some bits set in @bitmap, or page is pinned more than once. + */ +static +struct shared_page *remove_shared_page_entry_locked(struct shared_page_mgr *mgr, + u64 hva) { struct shared_page *sp; + struct folio *folio; - sp = shared_page_search(mgr, gfn); + sp = search_shared_page_entry_locked(mgr, hva); if (sp) { + /* + * The bitmap records the 4K subpages within the compound page + * that are shared memory of VM. Clear the bit for this @hva. + */ + if (sp->order) { + unsigned int pg_off = (hva & ~sp->track_hva) >> PAGE_SHIFT; + + clear_bit(pg_off, sp->bitmap); + if (!bitmap_empty(sp->bitmap, 1U << sp->order)) + return NULL; + } + + /* + * The @sp->track_page may be pinned more than once in some + * scenarios, such as device passthrough. We don't remove this + * entry from the tree. + */ + folio = page_folio(sp->track_page); + if ((folio_test_large(folio) && + atomic_read(&folio->_pincount) >= 2) || + (!folio_test_large(folio) && + ((unsigned int)folio_ref_count(folio) >= GUP_PIN_COUNTING_BIAS * 2))) { + return NULL; + } + + trace_kvm_csv3_sp_remove(page_to_pfn(sp->track_page), + sp->track_hva, sp->order); + rb_erase(&sp->node, &mgr->root); mgr->count--; + mgr->nr_pages -= 1UL << sp->order; } return sp; } +/** + * alloc_shared_page_entry - Allocate and initialize a shared_page entry for a + * given page + * @csv: Pointer to the KVM CSV info structure containing the slab cache. + * @hva: Host virtual address associated with the page (used for alignment and + * tracking). + * @page: The physical page being tracked (may be a base or compound/huge page). + * + * Allocates a new struct shared_page from the slab cache in @csv, initializes + * it to represent the memory region covered by @page at virtual address @hva. + * If @page is a compound page (order > 0), a bitmap is allocated to track + * touched 4K subpages, and the bit corresponding to the 4K offset of @hva + * within the compound page is set. For order-0 pages, no bitmap is allocated. + * The @track_hva field is aligned to the start of the page block (i.e., + * compound page boundary if applicable). + * + * Return: + * Pointer to the newly allocated and initialized shared_page on success, + * or NULL on allocation failure (either slab or bitmap). + */ +struct shared_page *alloc_shared_page_entry(struct kvm_csv_info *csv, + u64 hva, + struct page *page) +{ + struct shared_page *sp; + unsigned int order; + unsigned int pg_off; + + sp = kmem_cache_zalloc(csv->sp_slab, GFP_KERNEL); + if (!sp) + return NULL; + + order = compound_order(compound_head(page)); + if (order) { + unsigned long *bitmap = kvzalloc(BITS_TO_LONGS(1U << order) + * sizeof(unsigned long), + GFP_KERNEL); + if (!bitmap) { + kmem_cache_free(csv->sp_slab, sp); + return NULL; + } + sp->bitmap = bitmap; + pg_off = (hva >> PAGE_SHIFT) & ((1U << order) - 1); + set_bit(pg_off, sp->bitmap); + } + + sp->track_hva = hva & ~((1ULL << (order + PAGE_SHIFT)) - 1); + sp->order = order; + sp->track_page = compound_head(page); + + return sp; +} + +/** + * free_shared_page_entry - Free a shared_page entry and its associated + * resources + * @csv: Pointer to the KVM CSV info structure containing the slab cache. + * @sp: The shared_page entry to free. + * + * The caller must ensure that @sp is no longer in use (e.g., already removed + * from any rbtree) before calling this function. + */ +void free_shared_page_entry(struct kvm_csv_info *csv, struct shared_page *sp) +{ + if (!sp) + return; + + kvfree(sp->bitmap); + kmem_cache_free(csv->sp_slab, sp); +} + static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) { return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); @@ -2307,17 +2570,17 @@ static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, return r; } -static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, - struct kvm_memory_slot *slot, gfn_t gfn, - kvm_pfn_t *pfn) +static int csv_pin_shared_memory_locked(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, + gfn_t gfn, + kvm_pfn_t *pfn) { - struct page *page; u64 hva; - int npinned; kvm_pfn_t tmp_pfn; struct kvm *kvm = vcpu->kvm; struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct shared_page *sp; + unsigned int pg_off; bool write = !(slot->flags & KVM_MEM_READONLY); tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, @@ -2332,69 +2595,78 @@ static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, return 0; } + hva = __gfn_to_hva_memslot(slot, gfn); + if (page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { kvm_release_pfn_clean(tmp_pfn); + /* + * If we have already pinned the page when traversing the + * memslot, and the pinned page is part of a compound page that + * tracked by shared page entry, we must set the corresponding + * bit in the @bitmap to reflect that this subpage is shared + * memory for CSV3 VM. + * + * This ensures that all 4K subpages within a compound page can + * be accurately tracked, preventing premature removal of pages + * that are still be shared memory. + */ + shared_page_entry_set_bit_locked(&csv->sp_mgr, hva, NULL); *pfn = tmp_pfn; return 0; } kvm_release_pfn_clean(tmp_pfn); - sp = shared_page_search(&csv->sp_mgr, gfn); - if (!sp) { - sp = kmem_cache_zalloc(csv->sp_slab, GFP_KERNEL); - if (!sp) - return -ENOMEM; + /* + * If the shared page manager does not already track @hva: + * - Pin the page using pin_user_pages(); + * - Allocate a new shared_page entry, if the pinned page is part of + * a compound page, allocate a @bitmap and set the bit corresponding + * to this 4K subpage. + * + * If the @hva is already tracked by the manager, set the corresponding + * bit in the @bitmap. + */ + sp = shared_page_entry_set_bit_locked(&csv->sp_mgr, hva, NULL); + if (unlikely(sp)) { + pr_err_ratelimited("%s: not pinned but in the tree\n", __func__); + pg_off = (hva & ~sp->track_hva) >> PAGE_SHIFT; + *pfn = page_to_pfn(sp->track_page) + pg_off; + } else { + struct shared_page *old_sp; + struct page *page; + int npinned; - hva = __gfn_to_hva_memslot(slot, gfn); mmap_write_lock(current->mm); npinned = pin_user_pages(hva, 1, FOLL_WRITE | FOLL_LONGTERM, &page); + mmap_write_unlock(current->mm); if (npinned != 1) { - mmap_write_unlock(current->mm); - kmem_cache_free(csv->sp_slab, sp); pr_err_ratelimited("Failure pin gfn:0x%llx\n", gfn); return -ENOMEM; } - mmap_write_unlock(current->mm); - sp->page = page; - sp->gfn = gfn; - shared_page_insert(&csv->sp_mgr, sp); - } - - *pfn = page_to_pfn(sp->page); - - return 0; -} + sp = alloc_shared_page_entry(csv, hva, page); + if (!sp) { + unpin_user_page(page); + return -ENOMEM; + } -/** - * Return negative error code on fail, - * or return the number of pages unpinned successfully - */ -static int csv3_unpin_shared_memory(struct kvm *kvm, gpa_t gpa, u32 num_pages) -{ - struct kvm_csv_info *csv; - struct shared_page *sp; - gfn_t gfn; - unsigned long i; - int unpin_cnt = 0; + if (insert_shared_page_entry_locked(&csv->sp_mgr, sp, &old_sp)) { + *pfn = page_to_pfn(page); + } else { + pr_err_ratelimited("%s: search fail but insertion found\n", + __func__); + unpin_user_page(page); + free_shared_page_entry(csv, sp); - csv = &to_kvm_svm_csv(kvm)->csv_info; - gfn = gpa_to_gfn(gpa); + shared_page_entry_set_bit_locked(&csv->sp_mgr, hva, old_sp); - mutex_lock(&csv->sp_lock); - for (i = 0; i < num_pages; i++, gfn++) { - sp = shared_page_remove(&csv->sp_mgr, gfn); - if (sp) { - unpin_user_page(sp->page); - kmem_cache_free(csv->sp_slab, sp); - csv->sp_mgr.count--; - unpin_cnt++; + pg_off = (hva & ~old_sp->track_hva) >> PAGE_SHIFT; + *pfn = page_to_pfn(old_sp->track_page) + pg_off; } } - mutex_unlock(&csv->sp_lock); - return unpin_cnt; + return 0; } static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, @@ -2523,7 +2795,7 @@ static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, level = CSV3_PG_LEVEL_4K; else { mutex_lock(&csv->sp_lock); - ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + ret = csv_pin_shared_memory_locked(vcpu, slot, gfn, &pfn); mutex_unlock(&csv->sp_lock); if (ret) { /* Resume guest to retry #NPF. */ @@ -2546,6 +2818,235 @@ static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, return ret; } +/** + * csv_release_shared_memory - Release shared pages and notify userspace to + * madvise. + * @params: Pointer to ioctl input/output structure, containing GPA range + * and output fields. + * + * This function processes the KVM_CSV3_RELEASE_SHARED_MEMORY command by: + * - Iterating over each page in the specified GPA range; + * - For each page, attempting to remove it from the shared page manager; + * - If successful, updating the total number of unpinned pages; + * - Setting the start_hva to the first HVA that needs madvise (for + * contiguous range). + * + * The kernel ensures that: + * - Only one thread can modify the shared page manager at a time (via + * sp_lock); + * - The start_hva is set to the lowest HVA among all removed pages; + * - Userspace VMM should call madvise() on [start_hva, start_hva + + * unpinned * PAGE_SIZE). + * + * Return: 0 on success, negative error code on failure (though none are + * currently returned). + */ +static int csv_release_shared_memory(struct kvm *kvm, + struct kvm_csv3_handle_memory *params) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page *sp; + u64 hva; + gfn_t gfn = gpa_to_gfn(params->gpa); + u32 num_pages = params->num_pages; + int i; + + /* Initialize output fields */ + params->start_hva = 0; + params->unpinned = 0; + params->handled0 = 0; + + /* Protect shared page manager from concurrent access */ + mutex_lock(&csv->sp_lock); + for (i = 0; i < num_pages; i++, gfn++) { + hva = gfn_to_hva(kvm, gfn); + + if (unlikely(!params->start_hva)) + params->start_hva = hva; + + /* + * The madvise requested from user space need provide hva and + * length. This handler should fill back a range of contiguous + * hva. We maintain start_hva as the smallest hva among all + * released pages. + */ + if ((params->start_hva >> PAGE_SHIFT) + + params->unpinned != (hva >> PAGE_SHIFT)) + break; + + /* + * Try to remove the shared_page entry corresponding to this + * hva. If remove_shared_page_entry_locked() found and removed + * the entry from manager tree, it will return the entry, and + * we can release the page tracked by the entry. + */ + sp = remove_shared_page_entry_locked(&csv->sp_mgr, hva); + if (sp) { + /* Update @unpinned only when an sp is removed */ + params->unpinned += 1U << sp->order; + + /* + * If this page is a tail page of a compound page, its + * HVA may be smaller than the head page's HVA. Since + * madvise must cover the entire compound page, we + * update start_hva to the smallest HVA in the range. + */ + if (sp->track_hva < params->start_hva) { + params->handled0 = (1U << sp->order) - + ((params->start_hva - sp->track_hva) >> PAGE_SHIFT); + params->start_hva = sp->track_hva; + } else { + params->handled0 += 1U << sp->order; + } + + if (page_maybe_dma_pinned(sp->track_page)) + unpin_user_page(sp->track_page); + else + pr_err_ratelimited("%s: the track_page was not pinned\n", + __func__); + free_shared_page_entry(csv, sp); + } else { + break; + } + + cond_resched(); + } + mutex_unlock(&csv->sp_lock); + + return 0; +} + +static inline unsigned long get_vma_flags(unsigned long addr) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + unsigned long vm_flags = 0; + + down_read(&mm->mmap_lock); + vma = find_vma(mm, addr); + if (vma && vma->vm_start <= addr) + vm_flags = vma->vm_flags; + up_read(&mm->mmap_lock); + + return vm_flags; +} + +#define RETRY_CSV3_PIN_MAX 5 +#define RETRY_CSV3_ALLOC_SP_MAX 5 + +/** + * csv_get_shared_memory - Pin a number of pages specified in the input. + * @params: Pointer to ioctl input/output structure, containing GPA range and + * output field. + * + * This function processes the KVM_CSV3_GET_SHARED_MEMORY command by: + * - Iterating over each page in the specified GPA range; + * - For each page, checking if it is already tracked in the shared page + * manager; + * - If not, attempting to pin it and create a new entry; + * - Incrementing @pinned for every page that is successfully pinned. + * - Incrementing @handled for every GPA that is handled. + * + * Return: 0 on success, negative error code on failure. + */ +static int csv_get_shared_memory(struct kvm *kvm, + struct kvm_csv3_handle_memory *params) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page *sp, *old_sp; + struct page *page; + unsigned long vm_flags = 0; + u64 hva; + gfn_t gfn = gpa_to_gfn(params->gpa); + u32 num_pages = params->num_pages; + int i, npinned, try_pin, try_alloc_sp; + int ret = 0; + + /* Initialize output field */ + params->pinned = 0; + params->handled1 = 0; + + /* Protect shared page manager from concurrent access */ + mutex_lock(&csv->sp_lock); + for (i = 0; i < num_pages; i++, gfn++) { + hva = gfn_to_hva(kvm, gfn); + if (kvm_is_error_hva(hva)) { + /* + * If the HVA is invalid (e.g., not mapped), skip this + * page. This prevents unnecessary pinning attempts and + * continues processing remaining pages. + */ + params->handled1++; + continue; + } + + if (unlikely(!vm_flags)) + vm_flags = get_vma_flags((unsigned long)hva); + + /* + * Reject hugetlbfs-backed pages to prevent severe page + * migration pressure when 2M hugetlb is used with CMA. + */ + if (!vm_flags || (vm_flags & VM_HUGETLB)) { + ret = -EINVAL; + goto out; + } + + sp = shared_page_entry_set_bit_locked(&csv->sp_mgr, hva, NULL); + if (sp) { + params->handled1++; + continue; + } + + try_pin = 0; + try_alloc_sp = 0; +retry_pin: + /* + * The page is not yet tracked. We need to pin it and create a + * new entry. Acquire mm_write_lock to safely pin the page. + */ + mmap_write_lock(current->mm); + npinned = pin_user_pages(hva, 1, FOLL_WRITE | FOLL_LONGTERM, &page); + mmap_write_unlock(current->mm); + if (npinned != 1) { + if (++try_pin <= RETRY_CSV3_PIN_MAX) + goto retry_pin; + pr_err_ratelimited("%s: try pin fail\n", __func__); + break; + } + +retry_alloc_sp: + sp = alloc_shared_page_entry(csv, hva, page); + if (!sp) { + if (++try_alloc_sp <= RETRY_CSV3_ALLOC_SP_MAX) + goto retry_alloc_sp; + pr_err_ratelimited("%s: try alloc sp fail\n", __func__); + unpin_user_page(page); + break; + } + + if (!insert_shared_page_entry_locked(&csv->sp_mgr, sp, &old_sp)) { + pr_err_ratelimited("%s: search fail but insertion found\n", + __func__); + unpin_user_page(page); + free_shared_page_entry(csv, sp); + + shared_page_entry_set_bit_locked(&csv->sp_mgr, hva, old_sp); + } else { + /* Update @pinned only upon insertion of a new sp */ + params->pinned += 1U << sp->order; + } + params->handled1++; + + cond_resched(); + } + +out: + mutex_unlock(&csv->sp_lock); + + return ret; +} + static void csv_vm_destroy(struct kvm *kvm) { struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; @@ -2562,13 +3063,27 @@ static void csv_vm_destroy(struct kvm *kvm) mutex_lock(&csv->sp_lock); while ((node = rb_first(&csv->sp_mgr.root))) { sp = rb_entry(node, struct shared_page, node); + /* Remove shared page entry from the manager tree */ rb_erase(&sp->node, &csv->sp_mgr.root); - unpin_user_page(sp->page); - kmem_cache_free(csv->sp_slab, sp); + /* Update shared page statistics */ csv->sp_mgr.count--; + csv->sp_mgr.nr_pages -= 1UL << sp->order; + /* Putback the tracked page to system */ + if (page_maybe_dma_pinned(sp->track_page)) + unpin_user_page(sp->track_page); + else + pr_err_ratelimited("%s: the track_page was not pinned\n", + __func__); + free_shared_page_entry(csv, sp); + + cond_resched(); } mutex_unlock(&csv->sp_lock); + if (csv->sp_mgr.count || csv->sp_mgr.nr_pages) + pr_err("%s: SP_MGR_ERR: track fault, cnt:%lld nr_pages:0x%lx\n", + __func__, csv->sp_mgr.count, csv->sp_mgr.nr_pages); + kmem_cache_destroy(csv->sp_slab); csv->sp_slab = NULL; @@ -2649,10 +3164,12 @@ static void csv_guest_memory_reclaimed(struct kvm *kvm) static int csv3_handle_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) { + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; struct kvm_csv3_handle_memory params; int r = -EINVAL; - if (!csv3_guest(kvm)) + if (!csv3_guest(kvm) || + !(csv->inuse_ext & KVM_CAP_HYGON_COCO_EXT_CSV3_SP_MGR)) return -ENOTTY; if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, @@ -2661,12 +3178,20 @@ static int csv3_handle_memory(struct kvm *kvm, struct kvm_sev_cmd *argp) switch (params.opcode) { case KVM_CSV3_RELEASE_SHARED_MEMORY: - r = csv3_unpin_shared_memory(kvm, params.gpa, params.num_pages); + r = csv_release_shared_memory(kvm, ¶ms); break; - default: + case KVM_CSV3_GET_SHARED_MEMORY: + r = csv_get_shared_memory(kvm, ¶ms); break; + default: + goto out; } + if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, + sizeof(params))) + return -EFAULT; + +out: return r; }; @@ -3141,6 +3666,7 @@ static int csv_get_hygon_coco_extension(struct kvm *kvm) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET; if (csv->fw_ext & CSV_EXT_CSV3_LFINISH_EX) csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_LFINISH_EX; + csv->kvm_ext |= KVM_CAP_HYGON_COCO_EXT_CSV3_SP_MGR; } csv->kvm_ext_valid = true; } diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 3531a187d5d9..9a726bc67e97 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h @@ -1865,6 +1865,106 @@ TRACE_EVENT(kvm_rmp_fault, __entry->error_code, __entry->rmp_level, __entry->psmash_ret) ); +/* + * Tracepoint for the Hygon CSV3 shared page processing + */ +TRACE_EVENT(kvm_csv3_sp_insert_dup, + TP_PROTO(unsigned long n_track_pfn, + u64 n_track_hva, + unsigned int n_order, + unsigned long o_track_pfn, + u64 o_track_hva, + unsigned int o_order), + TP_ARGS(n_track_pfn, n_track_hva, n_order, + o_track_pfn, o_track_hva, o_order), + + TP_STRUCT__entry( + __field(unsigned long, n_track_pfn) + __field(u64, n_track_hva) + __field(unsigned int, n_order) + __field(unsigned long, o_track_pfn) + __field(u64, o_track_hva) + __field(unsigned int, o_order) + ), + + TP_fast_assign( + __entry->n_track_pfn = n_track_pfn; + __entry->n_track_hva = n_track_hva; + __entry->n_order = n_order; + __entry->o_track_pfn = o_track_pfn; + __entry->o_track_hva = o_track_hva; + __entry->o_order = o_order; + ), + + TP_printk("n_sp pfn:0x%lx, hva:0x%llx, order:%u " + "o_sp pfn:0x%lx, hva:0x%llx, order:%u", + __entry->n_track_pfn, __entry->n_track_hva, __entry->n_order, + __entry->o_track_pfn, __entry->o_track_hva, __entry->o_order) +); + +TRACE_EVENT(kvm_csv3_sp_insert, + TP_PROTO(unsigned long n_track_pfn, u64 n_track_hva, unsigned int n_order), + TP_ARGS(n_track_pfn, n_track_hva, n_order), + + TP_STRUCT__entry( + __field(unsigned long, n_track_pfn) + __field(u64, n_track_hva) + __field(unsigned int, n_order) + ), + + TP_fast_assign( + __entry->n_track_pfn = n_track_pfn; + __entry->n_track_hva = n_track_hva; + __entry->n_order = n_order; + ), + + TP_printk("n_sp pfn:0x%lx, hva:0x%llx order:%u", + __entry->n_track_pfn, __entry->n_track_hva, __entry->n_order) +); + +TRACE_EVENT(kvm_csv3_sp_hit, + TP_PROTO(unsigned long track_pfn, u64 track_hva, u64 hva, unsigned int order), + TP_ARGS(track_pfn, track_hva, hva, order), + + TP_STRUCT__entry( + __field(unsigned long, track_pfn) + __field(u64, track_hva) + __field(u64, hva) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->track_pfn = track_pfn; + __entry->track_hva = track_hva; + __entry->hva = hva; + __entry->order = order; + ), + + TP_printk("sp pfn:0x%lx, hva:0x%llx, subhva:0x%llx, order:%u", + __entry->track_pfn, __entry->track_hva, + __entry->hva, __entry->order) +); + +TRACE_EVENT(kvm_csv3_sp_remove, + TP_PROTO(unsigned long track_pfn, u64 track_hva, unsigned int order), + TP_ARGS(track_pfn, track_hva, order), + + TP_STRUCT__entry( + __field(unsigned long, track_pfn) + __field(u64, track_hva) + __field(unsigned int, order) + ), + + TP_fast_assign( + __entry->track_pfn = track_pfn; + __entry->track_hva = track_hva; + __entry->order = order; + ), + + TP_printk("sp pfn:0x%lx, hva:0x%llx order:%u", + __entry->track_pfn, __entry->track_hva, __entry->order) +); + #endif /* _TRACE_KVM_H */ #undef TRACE_INCLUDE_PATH diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3e59e1672043..21855275e867 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -13990,6 +13990,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_rmp_fault); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_csv3_sp_insert_dup); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_csv3_sp_insert); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_csv3_sp_hit); +EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_csv3_sp_remove); static int __init kvm_x86_init(void) { diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7080b43ac54e..2da470d453df 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1247,6 +1247,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_HYGON_COCO_EXT_CSV3_INJ_SECRET (1 << 2) /* support finish launch process by CSV3_CMD_LAUNCH_FINISH_EX firmware API */ #define KVM_CAP_HYGON_COCO_EXT_CSV3_LFINISH_EX (1 << 3) +/* support userspace to request management of CSV3 shared pages */ +#define KVM_CAP_HYGON_COCO_EXT_CSV3_SP_MGR (1 << 4) #ifdef KVM_CAP_IRQ_ROUTING @@ -2205,12 +2207,72 @@ struct kvm_csv3_receive_encrypt_context { __u32 trans_len; }; -#define KVM_CSV3_RELEASE_SHARED_MEMORY (0x0001) - +/** + * struct kvm_csv3_handle_memory - IOCTL data structure for CSV3 memory + * operations. + * + * This union is used to pass input and output parameters between userspace + * and kernel for KVM_CSV3 memory management commands. + * + * It supports two ioctl commands: + * - KVM_CSV3_RELEASE_SHARED_MEMORY: Notify userspace VMM to madvise pages + * as unused, releasing shared memory mappings. + * - KVM_CSV3_GET_SHARED_MEMORY: Query how many pages are currently pinned + * at a given GPA range. + * + * The union contains three nested structs: + * - Input (for both commands) + * - Output (for KVM_CSV3_RELEASE_SHARED_MEMORY) + * - Output (for KVM_CSV3_GET_SHARED_MEMORY) + */ struct kvm_csv3_handle_memory { - __u64 gpa; - __u32 num_pages; - __u32 opcode; + union { + /* Input of the ioctl command. */ + /** + * @gpa: The start guest physical address (GPA) to be handled. + * @num_pages: The number of consecutive pages starting at + * @gpa. + * @opcode: The command ID to determine which operation to + * perform. + **/ + struct { + __u64 gpa; + __u32 num_pages; +#define KVM_CSV3_RELEASE_SHARED_MEMORY 0x0001 +#define KVM_CSV3_GET_SHARED_MEMORY 0x0002 + __u32 opcode; + }; + /* Output of ioctl command KVM_CSV3_RELEASE_SHARED_MEMORY. */ + /** + * @start_hva: The start host virtual address (HVA) that + * userspace VMM should madvise as unused. + * @unpinned: The number of pages that were successfully + * unpinned and can now be madvised in userspace. + * @unused0: Padding for alignment. + * @handled0: The number of pages start from GPA are handled. + * The userspace VMM need this information to move + * forward. + */ + struct { + __u64 start_hva; + __u32 unpinned; + __u32 handled0; + }; + /* Output of ioctl command KVM_CSV3_GET_SHARED_MEMORY. */ + /** + * @unused1: Padding for alignment. + * @pinned: The number of pages currently pinned for the + * specified GPA range. + * @handled1: The number of pages start from GPA are handled. + * The userspace VMM need this information to move + * forward. + */ + struct { + __u64 unused1; + __u32 pinned; + __u32 handled1; + }; + }; }; /* Available with KVM_CAP_ARM_RME, only for VMs with KVM_VM_TYPE_ARM_REALM */ -- Gitee From d88902822ea40a186e69078abca9c663beee89e5 Mon Sep 17 00:00:00 2001 From: Ge Yang Date: Tue, 2 Sep 2025 09:00:42 +0800 Subject: [PATCH 08/10] x86/csv: Allocate memory cyclically from different csv_cma Upstream: no Currently, when starting a csv3 virtual machine, the function csv_alloc_from_contiguous() is called to allocate memory, starting from csv_cma[0], then csv_cma[1], and so on in sequence. When multiple csv3 virtual machines are started concurrently, this easily causes contention on the same csv_cma, which affects performance. It is necessary to adjust the logic so that each time the csv_alloc_from_contiguous() function is called for memory allocation, it allocates from the csv_cma that follows the one used in the previous allocation. This adjustment can significantly improve performance. The specific comparison data is as follows: Average memory allocation time (when starting 100 4G csv3 virtual machines concurrently) before 30s after 10s Hygon-SIG: commit none hygon x86/csv: Allocate memory cyclically from different csv_cma Signed-off-by: Ge Yang Signed-off-by: hanliyang --- arch/x86/mm/mem_encrypt_hygon.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index 69fd83765c1b..e321edbd2dfa 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -198,6 +198,7 @@ struct csv_cma { struct cma_array { unsigned long count; + unsigned int index; struct csv_cma csv_cma[]; }; @@ -269,6 +270,7 @@ static void __init csv_cma_reserve_mem(void) } array->count = 0; + array->index = 0; csv_contiguous_pernuma_area[node] = array; for (i = 0; i < count; i++) { @@ -378,7 +380,8 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, count = array->count; while (count) { - csv_cma = &array->csv_cma[count - 1]; + array->index = (array->index + 1) % count; + csv_cma = &array->csv_cma[array->index]; /* * The value check of csv_cma->fast is lockless, but -- Gitee From c736686c6da7ab0f9ff52356940184aae08ace08 Mon Sep 17 00:00:00 2001 From: NiZhiguang Date: Tue, 5 Aug 2025 06:37:40 +0000 Subject: [PATCH 09/10] x86/csv: Add function to get free size of CSV3 CMA Upstream: no The private memory of a CSV3 guest is allocated from CSV3 CMA. User need to know the free size of CSV3 CMA before launching a CSV3 guest. So, add sysfs interface /sys/kernel/mm/csv3_cma/mem_info to show memory use details by CSV3. 1. show memory usage of CSV3 shared memory. 2. show memory usage of CSV3 private memory. 3. show memory usage of CSV3 guest NPT page tables. 4. show memory usage of the system management metadata of CSV3. This commit helps to better observe and analyze memory allocation behavior for CSV3 guests. Hygon-SIG: commit none hygon x86/csv: Add function to get free size of CSV3 CMA Signed-off-by: yangge Signed-off-by: NiZhiguang Signed-off-by: hanliyang --- arch/x86/include/asm/csv.h | 9 ++ arch/x86/kvm/svm/csv.c | 34 ++++++ arch/x86/mm/mem_encrypt_hygon.c | 171 ++++++++++++++++++++++++++++- drivers/crypto/ccp/hygon/csv-dev.c | 5 + 4 files changed, 218 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h index 18ddf881a6fc..2525c52d42fd 100644 --- a/arch/x86/include/asm/csv.h +++ b/arch/x86/include/asm/csv.h @@ -12,6 +12,8 @@ #ifndef __ASSEMBLY__ +#include + #ifdef CONFIG_HYGON_CSV struct csv_mem { @@ -24,6 +26,13 @@ struct csv_mem { extern struct csv_mem *csv_smr; extern unsigned int csv_smr_num; +#ifdef CONFIG_SYSFS +extern atomic_long_t csv3_npt_size; +extern atomic_long_t csv3_pri_mem; +extern unsigned long csv3_meta; +extern atomic_long_t csv3_shared_mem[MAX_NUMNODES]; +#endif /* CONFIG_SYSFS */ + void __init early_csv_reserve_mem(void); phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 8c8a92fd692a..cfbb6e3608be 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -906,6 +906,11 @@ struct kvm_csv_info { bool kvm_ext_valid; /* if @kvm_ext field is valid */ u32 kvm_ext; /* extensions supported by KVM */ u32 inuse_ext; /* extensions inused by current VM */ + +#ifdef CONFIG_SYSFS + unsigned long npt_size; + unsigned long pri_mem; +#endif }; struct kvm_svm_csv { @@ -919,6 +924,20 @@ struct secure_memory_region { u64 hpa; }; +#ifdef CONFIG_SYSFS +static void update_csv_share_mem(struct page *page, bool add) +{ + int nid = page_to_nid(page); + + if (add) + atomic_long_add(page_size(page), &csv3_shared_mem[nid]); + else + atomic_long_sub(page_size(page), &csv3_shared_mem[nid]); +} +#else +static void update_csv_share_mem(struct page *page, bool add) { } +#endif /* CONFIG_SYSFS */ + /** * insert_shared_page_entry_locked - Insert a shared_page into the manager tree * @mgr: Pointer to the shared page manager (must be valid). @@ -999,6 +1018,7 @@ static bool insert_shared_page_entry_locked(struct shared_page_mgr *mgr, /* Update shared page statistics */ mgr->count++; mgr->nr_pages += 1UL << new_sp->order; + update_csv_share_mem(new_sp->track_page, true); return true; } @@ -1155,6 +1175,7 @@ struct shared_page *remove_shared_page_entry_locked(struct shared_page_mgr *mgr, rb_erase(&sp->node, &mgr->root); mgr->count--; mgr->nr_pages -= 1UL << sp->order; + update_csv_share_mem(sp->track_page, false); } return sp; @@ -1441,6 +1462,13 @@ static int csv3_set_guest_private_memory(struct kvm *kvm, struct kvm_sev_cmd *ar list_splice(&tmp_list, &csv->smr_list); +#ifdef CONFIG_SYSFS + csv->npt_size = ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + csv->pri_mem = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift); + atomic_long_add(csv->npt_size, &csv3_npt_size); + atomic_long_add(csv->pri_mem, &csv3_pri_mem); +#endif + goto done; e_free_smr: @@ -3068,6 +3096,7 @@ static void csv_vm_destroy(struct kvm *kvm) /* Update shared page statistics */ csv->sp_mgr.count--; csv->sp_mgr.nr_pages -= 1UL << sp->order; + update_csv_share_mem(sp->track_page, false); /* Putback the tracked page to system */ if (page_maybe_dma_pinned(sp->track_page)) unpin_user_page(sp->track_page); @@ -3110,6 +3139,11 @@ static void csv_vm_destroy(struct kvm *kvm) kfree(smr); } } + +#ifdef CONFIG_SYSFS + atomic_long_sub(csv->npt_size, &csv3_npt_size); + atomic_long_sub(csv->pri_mem, &csv3_pri_mem); +#endif } } diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index e321edbd2dfa..cfca49bc1795 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -20,6 +20,10 @@ #include #include #include +#include +#include +#include +#include #include #include #include @@ -191,13 +195,40 @@ EXPORT_SYMBOL_GPL(csv_smr); unsigned int csv_smr_num; EXPORT_SYMBOL_GPL(csv_smr_num); +#ifdef CONFIG_SYSFS + +/** + * Global counters exposed via /sys/kernel/mm/csv3_cma/mem_info. Update + * atomically during VM creation/destruction. + * + * csv3_npt_size: total size of NPT tables allocated. + * csv3_pri_mem: total private memory allocated for CSV guests. + * csv3_meta: metadata overhead for CSV memory regions. + * csv3_shared_mem: size of all the CSV3 VMs' shared memory. + */ +atomic_long_t csv3_npt_size = ATOMIC_LONG_INIT(0); +EXPORT_SYMBOL_GPL(csv3_npt_size); + +atomic_long_t csv3_pri_mem = ATOMIC_LONG_INIT(0); +EXPORT_SYMBOL_GPL(csv3_pri_mem); + +unsigned long csv3_meta; +EXPORT_SYMBOL_GPL(csv3_meta); + +atomic_long_t csv3_shared_mem[MAX_NUMNODES]; +EXPORT_SYMBOL_GPL(csv3_shared_mem); + +#endif /* CONFIG_SYSFS */ + struct csv_cma { + int nid; int fast; struct cma *cma; }; struct cma_array { unsigned long count; + atomic64_t csv_used_size; unsigned int index; struct csv_cma csv_cma[]; }; @@ -270,12 +301,14 @@ static void __init csv_cma_reserve_mem(void) } array->count = 0; + atomic64_set(&array->csv_used_size, 0); array->index = 0; csv_contiguous_pernuma_area[node] = array; for (i = 0; i < count; i++) { csv_cma = &array->csv_cma[i]; csv_cma->fast = 1; + csv_cma->nid = node; snprintf(name, sizeof(name), "csv-n%dc%d", node, i); ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, @@ -352,6 +385,7 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, int nid; int nr_nodes; struct page *page = NULL; + struct cma_array *array = NULL; phys_addr_t phys_addr; int count; struct csv_cma *csv_cma; @@ -373,7 +407,7 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, nid = next_node_in(nid, *nodes_allowed); for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { - struct cma_array *array = csv_contiguous_pernuma_area[nid]; + array = csv_contiguous_pernuma_area[nid]; if (!array) continue; @@ -415,6 +449,7 @@ phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, } success: + atomic64_add(PAGE_ALIGN(size), &array->csv_used_size); phys_addr = page_to_phys(page); clflush_cache_range(__va(phys_addr), size); @@ -425,6 +460,7 @@ EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); void csv_release_to_contiguous(phys_addr_t pa, size_t size) { struct csv_cma *csv_cma; + struct cma_array *array = NULL; struct page *page = pfn_to_page(pa >> PAGE_SHIFT); WARN_ON(!page); @@ -435,7 +471,140 @@ void csv_release_to_contiguous(phys_addr_t pa, size_t size) page->private = 0; csv_cma->fast = 1; cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + array = csv_contiguous_pernuma_area[csv_cma->nid]; + atomic64_sub(PAGE_ALIGN(size), &array->csv_used_size); } } } EXPORT_SYMBOL_GPL(csv_release_to_contiguous); + +#ifdef CONFIG_SYSFS + +/** + * The "mem_info" file where the free size of csv cma is read from. + */ +static ssize_t mem_info_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int node; + int offset = 0; + unsigned long csv_used_size, total_used_size = 0; + unsigned long csv_size, total_csv_size = 0; + unsigned long shared_mem, total_shared_mem = 0; + unsigned long npt_size, pri_mem; + struct cma_array *array = NULL; + unsigned long bytes_per_mib = 1UL << 20; + + for_each_node_state(node, N_ONLINE) { + array = csv_contiguous_pernuma_area[node]; + if (array == NULL) { + csv_size = 0; + csv_used_size = 0; + shared_mem = 0; + + offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 shared size:%10lu MiB\n", shared_mem); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " total cma size:%12lu MiB\n", csv_size); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 cma used:%13lu MiB\n", csv_used_size); + continue; + } + + shared_mem = DIV_ROUND_UP(atomic_long_read(&csv3_shared_mem[node]), + bytes_per_mib); + csv_size = DIV_ROUND_UP(array->count * CSV_CMA_SIZE, + bytes_per_mib); + csv_used_size = DIV_ROUND_UP(atomic64_read(&array->csv_used_size), + bytes_per_mib); + + total_shared_mem += shared_mem; + total_csv_size += csv_size; + total_used_size += csv_used_size; + + offset += snprintf(buf + offset, PAGE_SIZE - offset, "Node%d:\n", node); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 shared size:%10lu MiB\n", shared_mem); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " total cma size:%12lu MiB\n", csv_size); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 cma used:%13lu MiB\n", csv_used_size); + } + + npt_size = DIV_ROUND_UP(atomic_long_read(&csv3_npt_size), bytes_per_mib); + pri_mem = DIV_ROUND_UP(atomic_long_read(&csv3_pri_mem), bytes_per_mib); + + offset += snprintf(buf + offset, PAGE_SIZE - offset, "All Nodes:\n"); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 shared size:%10lu MiB\n", total_shared_mem); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " total cma size:%12lu MiB\n", total_csv_size); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 cma used:%13lu MiB\n", total_used_size); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " npt table:%16lu MiB\n", npt_size); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " csv3 private memory:%6lu MiB\n", pri_mem); + offset += snprintf(buf + offset, PAGE_SIZE - offset, + " meta data:%16lu MiB\n", + DIV_ROUND_UP(csv3_meta, bytes_per_mib)); + + return offset; +} + +static struct kobj_attribute csv_cma_attr = __ATTR(mem_info, 0444, mem_info_show, NULL); + +/* + * Create a group of attributes so that we can create and destroy them all + * at once. + */ +static struct attribute *csv_cma_attrs[] = { + &csv_cma_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static const struct attribute_group csv_cma_attr_group = { + .attrs = csv_cma_attrs, +}; + +static struct kobject *csv_cma_kobj_root; + +static int __init csv_cma_sysfs_init(void) +{ + int err, i; + + if (!is_x86_vendor_hygon() || !boot_cpu_has(X86_FEATURE_CSV3)) + return 0; + + csv_cma_kobj_root = kobject_create_and_add("csv3_cma", mm_kobj); + if (!csv_cma_kobj_root) + return -ENOMEM; + + err = sysfs_create_group(csv_cma_kobj_root, &csv_cma_attr_group); + if (err) + goto out; + + for (i = 0; i < MAX_NUMNODES; i++) + atomic_long_set(&csv3_shared_mem[i], 0); + + return 0; + +out: + kobject_put(csv_cma_kobj_root); + return err; +} + +static void __exit csv_cma_sysfs_exit(void) +{ + if (!is_x86_vendor_hygon() || !boot_cpu_has(X86_FEATURE_CSV3)) + return; + + if (csv_cma_kobj_root != NULL) + kobject_put(csv_cma_kobj_root); +} + +module_init(csv_cma_sysfs_init); +module_exit(csv_cma_sysfs_exit); + +#endif /* CONFIG_SYSFS */ diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 92a0100532fc..5d88ab87f6f5 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -754,8 +754,13 @@ int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error csv_release_to_contiguous(cmd_set_smcr->base_address, 1UL << CSV_MR_ALIGN_BITS); + goto e_free_cmd_set_smcr; } +#ifdef CONFIG_SYSFS + csv3_meta = cmd_set_smcr->size; +#endif + e_free_cmd_set_smcr: kfree((void *)cmd_set_smcr); e_free_smr_area: -- Gitee From c1c5491d527e769ebbd45bb6b28e68f464ef0f5f Mon Sep 17 00:00:00 2001 From: Zhiguang Ni Date: Tue, 21 Oct 2025 17:38:14 +0800 Subject: [PATCH 10/10] x86/mm: csv: Increase the maximum value of `csv_mem_percentage` from 80 to 95 Upstream: no Increase the maximum value of `csv_mem_percentage` from 80 to 95. Please note that on systems with limited memory, this parameter should be configured with caution. Make sure to reserve sufficient memory for the host OS to operate properly. Hygon-SIG: commit none hygon x86/mm: csv: Increase the maximum value of `csv_mem_percentage` from 80 to 95 Signed-off-by: Zhiguang Ni Signed-off-by: hanliyang --- .../arch/x86/hygon-secure-virtualization.rst | 2 +- arch/x86/mm/mem_encrypt_hygon.c | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst index 3e709af93758..dace34d7e75f 100644 --- a/Documentation/arch/x86/hygon-secure-virtualization.rst +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -89,7 +89,7 @@ the linux kernel command line with csv_mem_size or csv_mem_percentage:: from these CMAs. For instance, csv_mem_percentage=60, means 60% system memory is reserved for CSV3. - The maximum percentage is 80. And the default percentage is 0. + The maximum percentage is 95. And the default percentage is 0. Limitations The reserved CSV3 memory within CMA cannot be used by kernel or any application that diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index cfca49bc1795..c3cc7b00d943 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -140,6 +140,8 @@ EXPORT_SYMBOL_GPL(csv3_active); /**************************** CSV3 CMA interfaces *****************************/ /******************************************************************************/ +#define CSV_MEM_PCT_MAX (95U) + /* 0 percent of total memory by default*/ static unsigned char csv_mem_percentage; static unsigned long csv_mem_size; @@ -170,13 +172,15 @@ static int __init cmdline_parse_csv_mem_percentage(char *str) ret = kstrtou8(str, 10, &percentage); if (!ret) { - csv_mem_percentage = min_t(unsigned char, percentage, 80); + csv_mem_percentage = min_t(unsigned char, percentage, CSV_MEM_PCT_MAX); if (csv_mem_percentage != percentage) - pr_warn("csv_mem_percentage is limited to 80.\n"); + pr_warn("csv_mem_percentage is limited to %d.\n", + CSV_MEM_PCT_MAX); } else { /* Disable CSV CMA. */ csv_mem_percentage = 0; - pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + pr_err("csv_mem_percentage is invalid. (0 - %d) is expected.\n", + CSV_MEM_PCT_MAX); } return ret; @@ -364,11 +368,12 @@ void __init early_csv_reserve_mem(void) total_pages = PHYS_PFN(memblock_phys_mem_size()); if (csv_mem_size) { if (csv_mem_size < (total_pages << PAGE_SHIFT)) { - csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); - if (csv_mem_percentage > 80) - csv_mem_percentage = 80; /* Maximum percentage */ + csv_mem_percentage = div_u64((u64)csv_mem_size * 100, + (u64)total_pages << PAGE_SHIFT); + if (csv_mem_percentage > CSV_MEM_PCT_MAX) + csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ } else - csv_mem_percentage = 80; /* Maximum percentage */ + csv_mem_percentage = CSV_MEM_PCT_MAX; /* Maximum percentage */ } if (!csv_mem_percentage) { -- Gitee