diff --git a/0002-Provide-pre-support-for-RAS.patch b/0002-Provide-pre-support-for-RAS.patch new file mode 100644 index 0000000000000000000000000000000000000000..2a323bbb910c806765e077daf0b7cdf616b19386 --- /dev/null +++ b/0002-Provide-pre-support-for-RAS.patch @@ -0,0 +1,6142 @@ +From 7c2e85dc609da105e114eac9e81004dae99103a3 Mon Sep 17 00:00:00 2001 +From: zenghuangyuan +Date: Tue, 2 Dec 2025 11:03:37 +0800 +Subject: [PATCH 02/16] Provide pre-support for RAS + +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + firmware/fw_base.S | 16 + + include/sbi/riscv_encoding.h | 2 + + include/sbi/sbi_bitmap.h | 5 + + include/sbi/sbi_byteorder.h | 80 ++ + include/sbi/sbi_domain.h | 3 + + include/sbi/sbi_domain_data.h | 93 ++ + include/sbi/sbi_ecall.h | 23 +- + include/sbi/sbi_ecall_interface.h | 124 ++ + include/sbi/sbi_error.h | 3 + + include/sbi/sbi_heap.h | 101 ++ + include/sbi/sbi_mpxy.h | 185 +++ + include/sbi/sbi_platform.h | 57 +- + include/sbi/sbi_scratch.h | 79 +- + include/sbi/sbi_slist.h | 33 + + include/sbi/sbi_sse.h | 95 ++ + include/sbi/sbi_trap.h | 5 + + include/sbi/sbi_types.h | 7 + + include/sbi_utils/fdt/fdt_driver.h | 63 + + include/sbi_utils/mailbox/fdt_mailbox.h | 35 + + include/sbi_utils/mailbox/mailbox.h | 180 +++ + include/sbi_utils/mailbox/rpmi_mailbox.h | 33 + + include/sbi_utils/mailbox/rpmi_msgprot.h | 706 ++++++++++ + include/sbi_utils/mpxy/fdt_mpxy.h | 26 + + include/sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h | 85 ++ + lib/sbi/Kconfig | 9 + + lib/sbi/objects.mk | 10 + + lib/sbi/sbi_domain_data.c | 142 ++ + lib/sbi/sbi_ecall.c | 61 +- + lib/sbi/sbi_ecall_base.c | 27 +- + lib/sbi/sbi_ecall_hsm.c | 7 +- + lib/sbi/sbi_ecall_ipi.c | 5 +- + lib/sbi/sbi_ecall_legacy.c | 42 +- + lib/sbi/sbi_ecall_mpxy.c | 72 + + lib/sbi/sbi_ecall_penglai.c | 18 +- + lib/sbi/sbi_ecall_pmu.c | 15 +- + lib/sbi/sbi_ecall_rfence.c | 5 +- + lib/sbi/sbi_ecall_srst.c | 5 +- + lib/sbi/sbi_ecall_sse.c | 64 + + lib/sbi/sbi_ecall_time.c | 5 +- + lib/sbi/sbi_ecall_vendor.c | 8 +- + lib/sbi/sbi_heap.c | 280 ++++ + lib/sbi/sbi_init.c | 32 + + lib/sbi/sbi_mpxy.c | 752 ++++++++++ + lib/sbi/sbi_scratch.c | 22 +- + lib/sbi/sbi_sse.c | 1296 ++++++++++++++++++ + lib/sbi/sbi_trap.c | 13 +- + lib/utils/fdt/fdt_driver.c | 93 ++ + lib/utils/fdt/fdt_early_drivers.carray | 3 + + lib/utils/fdt/fdt_fixup.c | 6 +- + lib/utils/fdt/objects.mk | 3 + + platform/fpga/ariane/platform.c | 1 + + platform/fpga/openpiton/platform.c | 2 + + platform/generic/include/platform_override.h | 8 +- + platform/generic/platform.c | 28 +- + platform/kendryte/k210/platform.c | 2 + + platform/nuclei/ux600/platform.c | 2 + + platform/template/platform.c | 1 + + 57 files changed, 4916 insertions(+), 162 deletions(-) + create mode 100644 include/sbi/sbi_byteorder.h + create mode 100644 include/sbi/sbi_domain_data.h + create mode 100644 include/sbi/sbi_heap.h + create mode 100644 include/sbi/sbi_mpxy.h + create mode 100644 include/sbi/sbi_slist.h + create mode 100644 include/sbi/sbi_sse.h + create mode 100644 include/sbi_utils/fdt/fdt_driver.h + create mode 100644 include/sbi_utils/mailbox/fdt_mailbox.h + create mode 100644 include/sbi_utils/mailbox/mailbox.h + create mode 100644 include/sbi_utils/mailbox/rpmi_mailbox.h + create mode 100644 include/sbi_utils/mailbox/rpmi_msgprot.h + create mode 100644 include/sbi_utils/mpxy/fdt_mpxy.h + create mode 100644 include/sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h + create mode 100644 lib/sbi/sbi_domain_data.c + create mode 100644 lib/sbi/sbi_ecall_mpxy.c + create mode 100644 lib/sbi/sbi_ecall_sse.c + create mode 100644 lib/sbi/sbi_heap.c + create mode 100644 lib/sbi/sbi_mpxy.c + create mode 100644 lib/sbi/sbi_sse.c + create mode 100644 lib/utils/fdt/fdt_driver.c + create mode 100644 lib/utils/fdt/fdt_early_drivers.carray + +diff --git a/firmware/fw_base.S b/firmware/fw_base.S +index 3f622b3b..8e03fe2c 100644 +--- a/firmware/fw_base.S ++++ b/firmware/fw_base.S +@@ -257,20 +257,28 @@ _bss_zero: + /* Preload HART details + * s7 -> HART Count + * s8 -> HART Stack Size ++ * s9 -> Heap Size ++ * s10 -> Heap Offset + */ + lla a4, platform + #if __riscv_xlen > 32 + lwu s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4) + lwu s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4) ++ lwu s9, SBI_PLATFORM_HEAP_SIZE_OFFSET(a4) + #else + lw s7, SBI_PLATFORM_HART_COUNT_OFFSET(a4) + lw s8, SBI_PLATFORM_HART_STACK_SIZE_OFFSET(a4) ++ lw s9, SBI_PLATFORM_HEAP_SIZE_OFFSET(a4) + #endif + + /* Setup scratch space for all the HARTs*/ + lla tp, _fw_end + mul a5, s7, s8 + add tp, tp, a5 ++ /* Setup heap base address */ ++ lla s10, _fw_start ++ sub s10, tp, s10 ++ add tp, tp, s9 + /* Keep a copy of tp */ + add t3, tp, zero + /* Counter */ +@@ -285,8 +293,11 @@ _scratch_init: + * t3 -> the firmware end address + * s7 -> HART count + * s8 -> HART stack size ++ * s9 -> Heap Size ++ * s10 -> Heap Offset + */ + add tp, t3, zero ++ sub tp, tp, s9 + mul a5, s8, t1 + sub tp, tp, a5 + li a5, SBI_SCRATCH_SIZE +@@ -298,6 +309,11 @@ _scratch_init: + sub a5, t3, a4 + REG_S a4, SBI_SCRATCH_FW_START_OFFSET(tp) + REG_S a5, SBI_SCRATCH_FW_SIZE_OFFSET(tp) ++ ++ /* Store fw_heap_offset and fw_heap_size in scratch space */ ++ REG_S s10, SBI_SCRATCH_FW_HEAP_OFFSET(tp) ++ REG_S s9, SBI_SCRATCH_FW_HEAP_SIZE_OFFSET(tp) ++ + /* Store next arg1 in scratch space */ + MOV_3R s0, a0, s1, a1, s2, a2 + call fw_next_arg1 +diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h +index 4d9cdb52..2a4e363c 100644 +--- a/include/sbi/riscv_encoding.h ++++ b/include/sbi/riscv_encoding.h +@@ -32,6 +32,8 @@ + #define MSTATUS_TVM _UL(0x00100000) + #define MSTATUS_TW _UL(0x00200000) + #define MSTATUS_TSR _UL(0x00400000) ++#define MSTATUS_SPELP _UL(0x00800000) ++#define MSTATUS_SDT _UL(0x01000000) + #define MSTATUS32_SD _UL(0x80000000) + #if __riscv_xlen == 64 + #define MSTATUS_UXL _ULL(0x0000000300000000) +diff --git a/include/sbi/sbi_bitmap.h b/include/sbi/sbi_bitmap.h +index 4f0ebb63..354476c9 100644 +--- a/include/sbi/sbi_bitmap.h ++++ b/include/sbi/sbi_bitmap.h +@@ -62,6 +62,11 @@ static inline void bitmap_zero(unsigned long *dst, int nbits) + } + } + ++static inline int bitmap_test(unsigned long *bmap, int bit) ++{ ++ return __test_bit(bit, bmap); ++} ++ + static inline void bitmap_zero_except(unsigned long *dst, + int exception, int nbits) + { +diff --git a/include/sbi/sbi_byteorder.h b/include/sbi/sbi_byteorder.h +new file mode 100644 +index 00000000..ed7cad1e +--- /dev/null ++++ b/include/sbi/sbi_byteorder.h +@@ -0,0 +1,80 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ */ ++ ++#ifndef __SBI_BYTEORDER_H__ ++#define __SBI_BYTEORDER_H__ ++ ++#ifdef __ASSEMBLER__ ++# define _conv_cast(type, val) (val) ++#else ++# include ++# define _conv_cast(type, val) ((type)(val)) ++#endif ++ ++#define __BSWAP16(x) ((((x) & 0x00ff) << 8) | \ ++ (((x) & 0xff00) >> 8)) ++#define __BSWAP32(x) ((((x) & 0x000000ff) << 24) | \ ++ (((x) & 0x0000ff00) << 8) | \ ++ (((x) & 0x00ff0000) >> 8) | \ ++ (((x) & 0xff000000) >> 24)) ++#define __BSWAP64(x) ((((x) & 0x00000000000000ffULL) << 56) | \ ++ (((x) & 0x000000000000ff00ULL) << 40) | \ ++ (((x) & 0x0000000000ff0000ULL) << 24) | \ ++ (((x) & 0x00000000ff000000ULL) << 8) | \ ++ (((x) & 0x000000ff00000000ULL) >> 8) | \ ++ (((x) & 0x0000ff0000000000ULL) >> 24) | \ ++ (((x) & 0x00ff000000000000ULL) >> 40) | \ ++ (((x) & 0xff00000000000000ULL) >> 56)) ++ ++#define BSWAP64(x) ({ uint64_t _sv = (x); __BSWAP64(_sv); }) ++#define BSWAP32(x) ({ uint32_t _sv = (x); __BSWAP32(_sv); }) ++#define BSWAP16(x) ({ uint16_t _sv = (x); __BSWAP16(_sv); }) ++ ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ /* CPU(little-endian) */ ++#define cpu_to_be16(x) _conv_cast(uint16_t, BSWAP16(x)) ++#define cpu_to_be32(x) _conv_cast(uint32_t, BSWAP32(x)) ++#define cpu_to_be64(x) _conv_cast(uint64_t, BSWAP64(x)) ++ ++#define be16_to_cpu(x) _conv_cast(uint16_t, BSWAP16(x)) ++#define be32_to_cpu(x) _conv_cast(uint32_t, BSWAP32(x)) ++#define be64_to_cpu(x) _conv_cast(uint64_t, BSWAP64(x)) ++ ++#define cpu_to_le16(x) _conv_cast(uint16_t, (x)) ++#define cpu_to_le32(x) _conv_cast(uint32_t, (x)) ++#define cpu_to_le64(x) _conv_cast(uint64_t, (x)) ++ ++#define le16_to_cpu(x) _conv_cast(uint16_t, (x)) ++#define le32_to_cpu(x) _conv_cast(uint32_t, (x)) ++#define le64_to_cpu(x) _conv_cast(uint64_t, (x)) ++#else /* CPU(big-endian) */ ++#define cpu_to_be16(x) _conv_cast(uint16_t, (x)) ++#define cpu_to_be32(x) _conv_cast(uint32_t, (x)) ++#define cpu_to_be64(x) _conv_cast(uint64_t, (x)) ++ ++#define be16_to_cpu(x) _conv_cast(uint16_t, (x)) ++#define be32_to_cpu(x) _conv_cast(uint32_t, (x)) ++#define be64_to_cpu(x) _conv_cast(uint64_t, (x)) ++ ++#define cpu_to_le16(x) _conv_cast(uint16_t, BSWAP16(x)) ++#define cpu_to_le32(x) _conv_cast(uint32_t, BSWAP32(x)) ++#define cpu_to_le64(x) _conv_cast(uint64_t, BSWAP64(x)) ++ ++#define le16_to_cpu(x) _conv_cast(uint16_t, BSWAP16(x)) ++#define le32_to_cpu(x) _conv_cast(uint32_t, BSWAP32(x)) ++#define le64_to_cpu(x) _conv_cast(uint64_t, BSWAP64(x)) ++#endif ++ ++#if __riscv_xlen == 64 ++#define cpu_to_lle cpu_to_le64 ++#define lle_to_cpu le64_to_cpu ++#elif __riscv_xlen == 32 ++#define cpu_to_lle cpu_to_le32 ++#define lle_to_cpu le32_to_cpu ++#else ++#error "Unknown __riscv_xlen" ++#endif ++ ++#endif /* __SBI_BYTEORDER_H__ */ +diff --git a/include/sbi/sbi_domain.h b/include/sbi/sbi_domain.h +index 5553d214..a91c4936 100644 +--- a/include/sbi/sbi_domain.h ++++ b/include/sbi/sbi_domain.h +@@ -12,6 +12,7 @@ + + #include + #include ++#include + + struct sbi_scratch; + +@@ -51,6 +52,8 @@ struct sbi_domain_memregion { + + /** Representation of OpenSBI domain */ + struct sbi_domain { ++ /** Internal state of per-domain data */ ++ struct sbi_domain_data_priv data_priv; + /** + * Logical index of this domain + * Note: This set by sbi_domain_finalize() in the coldboot path +diff --git a/include/sbi/sbi_domain_data.h b/include/sbi/sbi_domain_data.h +new file mode 100644 +index 00000000..7eeafdce +--- /dev/null ++++ b/include/sbi/sbi_domain_data.h +@@ -0,0 +1,93 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ */ ++ ++#ifndef __SBI_DOMAIN_DATA_H__ ++#define __SBI_DOMAIN_DATA_H__ ++ ++#include ++#include ++ ++struct sbi_domain; ++ ++/** Maximum domain data per-domain */ ++#define SBI_DOMAIN_MAX_DATA_PTRS 32 ++ ++/** Representation of per-domain data */ ++struct sbi_domain_data_priv { ++ /** Array of domain data pointers indexed by domain data identifier */ ++ void *idx_to_data_ptr[SBI_DOMAIN_MAX_DATA_PTRS]; ++}; ++ ++/** Representation of a domain data */ ++struct sbi_domain_data { ++ /** ++ * Head is used for maintaining data list ++ * ++ * Note: initialized by domain framework ++ */ ++ struct sbi_dlist head; ++ /** ++ * Identifier which used to locate per-domain data ++ * ++ * Note: initialized by domain framework ++ */ ++ unsigned long data_idx; ++ /** Size of per-domain data */ ++ unsigned long data_size; ++ /** Optional callback to setup domain data */ ++ int (*data_setup)(struct sbi_domain *dom, ++ struct sbi_domain_data *data, void *data_ptr); ++ /** Optional callback to cleanup domain data */ ++ void (*data_cleanup)(struct sbi_domain *dom, ++ struct sbi_domain_data *data, void *data_ptr); ++}; ++ ++/** ++ * Get per-domain data pointer for a given domain ++ * @param dom pointer to domain ++ * @param data pointer to domain data ++ * ++ * @return per-domain data pointer ++ */ ++void *sbi_domain_data_ptr(struct sbi_domain *dom, struct sbi_domain_data *data); ++ ++/** ++ * Setup all domain data for a domain ++ * @param dom pointer to domain ++ * ++ * @return 0 on success and negative error code on failure ++ * ++ * Note: This function is used internally within domain framework. ++ */ ++int sbi_domain_setup_data(struct sbi_domain *dom); ++ ++/** ++ * Cleanup all domain data for a domain ++ * @param dom pointer to domain ++ * ++ * Note: This function is used internally within domain framework. ++ */ ++void sbi_domain_cleanup_data(struct sbi_domain *dom); ++ ++/** ++ * Register a domain data ++ * @param hndl pointer to domain data ++ * ++ * @return 0 on success and negative error code on failure ++ * ++ * Note: This function must be used only in cold boot path. ++ */ ++int sbi_domain_register_data(struct sbi_domain_data *data); ++ ++/** ++ * Unregister a domain data ++ * @param hndl pointer to domain data ++ * ++ * Note: This function must be used only in cold boot path. ++ */ ++void sbi_domain_unregister_data(struct sbi_domain_data *data); ++ ++#endif +diff --git a/include/sbi/sbi_ecall.h b/include/sbi/sbi_ecall.h +index 46927f06..1ec2e064 100644 +--- a/include/sbi/sbi_ecall.h ++++ b/include/sbi/sbi_ecall.h +@@ -20,26 +20,25 @@ + struct sbi_trap_regs; + struct sbi_trap_info; + ++struct sbi_ecall_return { ++ /* Return flag to skip register update */ ++ bool skip_regs_update; ++ /* Return value */ ++ unsigned long value; ++}; ++ + struct sbi_ecall_extension { + struct sbi_dlist head; ++ char name[8]; + unsigned long extid_start; + unsigned long extid_end; ++ int (* register_extensions)(void); + int (* probe)(unsigned long extid, unsigned long *out_val); + int (* handle)(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap); ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out); + }; + +-extern struct sbi_ecall_extension ecall_base; +-extern struct sbi_ecall_extension ecall_legacy; +-extern struct sbi_ecall_extension ecall_time; +-extern struct sbi_ecall_extension ecall_rfence; +-extern struct sbi_ecall_extension ecall_ipi; +-extern struct sbi_ecall_extension ecall_vendor; +-extern struct sbi_ecall_extension ecall_hsm; +-extern struct sbi_ecall_extension ecall_srst; +-extern struct sbi_ecall_extension ecall_pmu; + extern struct sbi_ecall_extension ecall_penglai_host; + extern struct sbi_ecall_extension ecall_penglai_enclave; + +diff --git a/include/sbi/sbi_ecall_interface.h b/include/sbi/sbi_ecall_interface.h +index 9c2bba2d..dc08f81c 100644 +--- a/include/sbi/sbi_ecall_interface.h ++++ b/include/sbi/sbi_ecall_interface.h +@@ -29,6 +29,8 @@ + #define SBI_EXT_HSM 0x48534D + #define SBI_EXT_SRST 0x53525354 + #define SBI_EXT_PMU 0x504D55 ++#define SBI_EXT_SSE 0x535345 ++#define SBI_EXT_MPXY 0x4D505859 + + //Penglai + #define SBI_EXT_PENGLAI_HOST 0x100100 +@@ -243,6 +245,122 @@ enum sbi_pmu_ctr_type { + #define SBI_EXT_FIRMWARE_START 0x0A000000 + #define SBI_EXT_FIRMWARE_END 0x0AFFFFFF + ++ ++/* SBI Function IDs for SSE extension */ ++#define SBI_EXT_SSE_READ_ATTR 0x00000000 ++#define SBI_EXT_SSE_WRITE_ATTR 0x00000001 ++#define SBI_EXT_SSE_REGISTER 0x00000002 ++#define SBI_EXT_SSE_UNREGISTER 0x00000003 ++#define SBI_EXT_SSE_ENABLE 0x00000004 ++#define SBI_EXT_SSE_DISABLE 0x00000005 ++#define SBI_EXT_SSE_COMPLETE 0x00000006 ++#define SBI_EXT_SSE_INJECT 0x00000007 ++#define SBI_EXT_SSE_HART_UNMASK 0x00000008 ++#define SBI_EXT_SSE_HART_MASK 0x00000009 ++ ++/* SBI SSE Event Attributes. */ ++enum sbi_sse_attr_id { ++ SBI_SSE_ATTR_STATUS = 0x00000000, ++ SBI_SSE_ATTR_PRIO = 0x00000001, ++ SBI_SSE_ATTR_CONFIG = 0x00000002, ++ SBI_SSE_ATTR_PREFERRED_HART = 0x00000003, ++ SBI_SSE_ATTR_ENTRY_PC = 0x00000004, ++ SBI_SSE_ATTR_ENTRY_ARG = 0x00000005, ++ SBI_SSE_ATTR_INTERRUPTED_SEPC = 0x00000006, ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS = 0x00000007, ++ SBI_SSE_ATTR_INTERRUPTED_A6 = 0x00000008, ++ SBI_SSE_ATTR_INTERRUPTED_A7 = 0x00000009, ++ ++ SBI_SSE_ATTR_MAX = 0x0000000A ++}; ++ ++#define SBI_SSE_ATTR_STATUS_STATE_OFFSET 0 ++#define SBI_SSE_ATTR_STATUS_STATE_MASK 0x3 ++#define SBI_SSE_ATTR_STATUS_PENDING_OFFSET 2 ++#define SBI_SSE_ATTR_STATUS_INJECT_OFFSET 3 ++ ++#define SBI_SSE_ATTR_CONFIG_ONESHOT (1 << 0) ++ ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP BIT(0) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE BIT(1) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV BIT(2) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP BIT(3) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP BIT(4) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT BIT(5) ++ ++enum sbi_sse_state { ++ SBI_SSE_STATE_UNUSED = 0, ++ SBI_SSE_STATE_REGISTERED = 1, ++ SBI_SSE_STATE_ENABLED = 2, ++ SBI_SSE_STATE_RUNNING = 3, ++}; ++ ++/* SBI SSE Event IDs. */ ++/* Range 0x00000000 - 0x0000ffff */ ++#define SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS 0x00000000 ++#define SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP 0x00000001 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_0_START 0x00000002 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_0_END 0x00003fff ++#define SBI_SSE_EVENT_LOCAL_PLAT_0_START 0x00004000 ++#define SBI_SSE_EVENT_LOCAL_PLAT_0_END 0x00007fff ++ ++#define SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS 0x00008000 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_START 0x00008001 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_0_END 0x0000bfff ++#define SBI_SSE_EVENT_GLOBAL_PLAT_0_START 0x0000c000 ++#define SBI_SSE_EVENT_GLOBAL_PLAT_0_END 0x0000ffff ++ ++/* Range 0x00010000 - 0x0001ffff */ ++#define SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW 0x00010000 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_1_START 0x00010001 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_1_END 0x00013fff ++#define SBI_SSE_EVENT_LOCAL_PLAT_1_START 0x00014000 ++#define SBI_SSE_EVENT_LOCAL_PLAT_1_END 0x00017fff ++ ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_START 0x00018000 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_1_END 0x0001bfff ++#define SBI_SSE_EVENT_GLOBAL_PLAT_1_START 0x0001c000 ++#define SBI_SSE_EVENT_GLOBAL_PLAT_1_END 0x0001ffff ++ ++/* Range 0x00100000 - 0x0010ffff */ ++#define SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS 0x00100000 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_2_START 0x00100001 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_2_END 0x00103fff ++#define SBI_SSE_EVENT_LOCAL_PLAT_2_START 0x00104000 ++#define SBI_SSE_EVENT_LOCAL_PLAT_2_END 0x00107fff ++ ++#define SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS 0x00108000 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_START 0x00108001 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_2_END 0x0010bfff ++#define SBI_SSE_EVENT_GLOBAL_PLAT_2_START 0x0010c000 ++#define SBI_SSE_EVENT_GLOBAL_PLAT_2_END 0x0010ffff ++ ++/* Range 0xffff0000 - 0xffffffff */ ++#define SBI_SSE_EVENT_LOCAL_SOFTWARE 0xffff0000 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_3_START 0xffff0001 ++#define SBI_SSE_EVENT_LOCAL_RESERVED_3_END 0xffff3fff ++#define SBI_SSE_EVENT_LOCAL_PLAT_3_START 0xffff4000 ++#define SBI_SSE_EVENT_LOCAL_PLAT_3_END 0xffff7fff ++ ++#define SBI_SSE_EVENT_GLOBAL_SOFTWARE 0xffff8000 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_START 0xffff8001 ++#define SBI_SSE_EVENT_GLOBAL_RESERVED_3_END 0xffffbfff ++#define SBI_SSE_EVENT_GLOBAL_PLAT_3_START 0xffffc000 ++#define SBI_SSE_EVENT_GLOBAL_PLAT_3_END 0xffffffff ++ ++#define SBI_SSE_EVENT_GLOBAL_BIT BIT(15) ++#define SBI_SSE_EVENT_PLATFORM_BIT BIT(14) ++ ++/* SBI function IDs for MPXY extension */ ++#define SBI_EXT_MPXY_GET_SHMEM_SIZE 0x0 ++#define SBI_EXT_MPXY_SET_SHMEM 0x1 ++#define SBI_EXT_MPXY_GET_CHANNEL_IDS 0x2 ++#define SBI_EXT_MPXY_READ_ATTRS 0x3 ++#define SBI_EXT_MPXY_WRITE_ATTRS 0x4 ++#define SBI_EXT_MPXY_SEND_MSG_WITH_RESP 0x5 ++#define SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP 0x6 ++#define SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS 0x7 ++ + /* SBI return error codes */ + #define SBI_SUCCESS 0 + #define SBI_ERR_FAILED -1 +@@ -253,6 +371,12 @@ enum sbi_pmu_ctr_type { + #define SBI_ERR_ALREADY_AVAILABLE -6 + #define SBI_ERR_ALREADY_STARTED -7 + #define SBI_ERR_ALREADY_STOPPED -8 ++#define SBI_ERR_NO_SHMEM -9 ++#define SBI_ERR_INVALID_STATE -10 ++#define SBI_ERR_BAD_RANGE -11 ++#define SBI_ERR_TIMEOUT -12 ++#define SBI_ERR_IO -13 ++#define SBI_ERR_DENIED_LOCKED -14 + + #define SBI_LAST_ERR SBI_ERR_ALREADY_STOPPED + +diff --git a/include/sbi/sbi_error.h b/include/sbi/sbi_error.h +index dd65e14b..dc3e180c 100644 +--- a/include/sbi/sbi_error.h ++++ b/include/sbi/sbi_error.h +@@ -23,6 +23,9 @@ + #define SBI_EALREADY SBI_ERR_ALREADY_AVAILABLE + #define SBI_EALREADY_STARTED SBI_ERR_ALREADY_STARTED + #define SBI_EALREADY_STOPPED SBI_ERR_ALREADY_STOPPED ++#define SBI_ENO_SHMEM SBI_ERR_NO_SHMEM ++#define SBI_EINVALID_STATE SBI_ERR_INVALID_STATE ++#define SBI_EBAD_RANGE SBI_ERR_BAD_RANGE + + #define SBI_ENODEV -1000 + #define SBI_ENOSYS -1001 +diff --git a/include/sbi/sbi_heap.h b/include/sbi/sbi_heap.h +new file mode 100644 +index 00000000..a4b3f0c6 +--- /dev/null ++++ b/include/sbi/sbi_heap.h +@@ -0,0 +1,101 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __SBI_HEAP_H__ ++#define __SBI_HEAP_H__ ++ ++#include ++ ++/* Opaque declaration of heap control struct */ ++struct sbi_heap_control; ++ ++/* Global heap control structure */ ++extern struct sbi_heap_control global_hpctrl; ++ ++/* Alignment of heap base address and size */ ++#define HEAP_BASE_ALIGN 1024 ++ ++struct sbi_scratch; ++ ++/** Allocate from heap area */ ++void *sbi_malloc_from(struct sbi_heap_control *hpctrl, size_t size); ++ ++static inline void *sbi_malloc(size_t size) ++{ ++ return sbi_malloc_from(&global_hpctrl, size); ++} ++ ++/** Allocate aligned from heap area */ ++void *sbi_aligned_alloc_from(struct sbi_heap_control *hpctrl, ++ size_t alignment,size_t size); ++ ++static inline void *sbi_aligned_alloc(size_t alignment, size_t size) ++{ ++ return sbi_aligned_alloc_from(&global_hpctrl, alignment, size); ++} ++ ++/** Zero allocate from heap area */ ++void *sbi_zalloc_from(struct sbi_heap_control *hpctrl, size_t size); ++ ++static inline void *sbi_zalloc(size_t size) ++{ ++ return sbi_zalloc_from(&global_hpctrl, size); ++} ++ ++/** Allocate array from heap area */ ++static inline void *sbi_calloc(size_t nitems, size_t size) ++{ ++ return sbi_zalloc(nitems * size); ++} ++ ++static inline void *sbi_calloc_from(struct sbi_heap_control *hpctrl, ++ size_t nitems, size_t size) ++{ ++ return sbi_zalloc_from(hpctrl, nitems * size); ++} ++ ++/** Free-up to heap area */ ++void sbi_free_from(struct sbi_heap_control *hpctrl, void *ptr); ++ ++static inline void sbi_free(void *ptr) ++{ ++ return sbi_free_from(&global_hpctrl, ptr); ++} ++ ++/** Amount (in bytes) of free space in the heap area */ ++unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl); ++ ++static inline unsigned long sbi_heap_free_space(void) ++{ ++ return sbi_heap_free_space_from(&global_hpctrl); ++} ++ ++/** Amount (in bytes) of used space in the heap area */ ++unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl); ++ ++static inline unsigned long sbi_heap_used_space(void) ++{ ++ return sbi_heap_used_space_from(&global_hpctrl); ++} ++ ++/** Amount (in bytes) of reserved space in the heap area */ ++unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl); ++ ++static inline unsigned long sbi_heap_reserved_space(void) ++{ ++ return sbi_heap_reserved_space_from(&global_hpctrl); ++} ++ ++/** Initialize heap area */ ++int sbi_heap_init(struct sbi_scratch *scratch); ++int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base, ++ unsigned long size); ++int sbi_heap_alloc_new(struct sbi_heap_control **hpctrl); ++ ++#endif +diff --git a/include/sbi/sbi_mpxy.h b/include/sbi/sbi_mpxy.h +new file mode 100644 +index 00000000..9da2791e +--- /dev/null ++++ b/include/sbi/sbi_mpxy.h +@@ -0,0 +1,185 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Rahul Pathak ++ */ ++ ++#ifndef __SBI_MPXY_H__ ++#define __SBI_MPXY_H__ ++ ++#include ++ ++struct sbi_scratch; ++ ++#define SBI_MPXY_MSGPROTO_VERSION(Major, Minor) ((Major << 16) | Minor) ++ ++enum sbi_mpxy_attr_id { ++ /* Standard channel attributes managed by MPXY framework */ ++ SBI_MPXY_ATTR_MSG_PROT_ID = 0x00000000, ++ SBI_MPXY_ATTR_MSG_PROT_VER = 0x00000001, ++ SBI_MPXY_ATTR_MSG_MAX_LEN = 0x00000002, ++ SBI_MPXY_ATTR_MSG_SEND_TIMEOUT = 0x00000003, ++ SBI_MPXY_ATTR_MSG_COMPLETION_TIMEOUT = 0x00000004, ++ SBI_MPXY_ATTR_CHANNEL_CAPABILITY = 0x00000005, ++ SBI_MPXY_ATTR_SSE_EVENT_ID = 0x00000006, ++ SBI_MPXY_ATTR_MSI_CONTROL = 0x00000007, ++ SBI_MPXY_ATTR_MSI_ADDR_LO = 0x00000008, ++ SBI_MPXY_ATTR_MSI_ADDR_HI = 0x00000009, ++ SBI_MPXY_ATTR_MSI_DATA = 0x0000000A, ++ SBI_MPXY_ATTR_EVENTS_STATE_CONTROL = 0x0000000B, ++ SBI_MPXY_ATTR_STD_ATTR_MAX_IDX, ++ /* Message protocol specific attributes, managed by ++ * message protocol driver */ ++ SBI_MPXY_ATTR_MSGPROTO_ATTR_START = 0x80000000, ++ SBI_MPXY_ATTR_MSGPROTO_ATTR_END = 0xffffffff ++}; ++ ++/** ++ * SBI MPXY Message Protocol IDs ++ */ ++enum sbi_mpxy_msgproto_id { ++ SBI_MPXY_MSGPROTO_RPMI_ID = 0x00000000, ++ SBI_MPXY_MSGPROTO_MAX_IDX, ++ /** Vendor specific message protocol IDs */ ++ SBI_MPXY_MSGPROTO_VENDOR_START = 0x80000000, ++ SBI_MPXY_MSGPROTO_VENDOR_END = 0xffffffff ++}; ++ ++enum SBI_EXT_MPXY_SHMEM_FLAGS { ++ SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE = 0b00, ++ SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN = 0b01, ++ SBI_EXT_MPXY_SHMEM_FLAG_MAX_IDX ++}; ++ ++struct sbi_mpxy_msi_info { ++ /* MSI target address low 32-bit */ ++ u32 msi_addr_lo; ++ /* MSI target address high 32-bit */ ++ u32 msi_addr_hi; ++ /* MSI data */ ++ u32 msi_data; ++}; ++ ++/** ++ * Channel attributes. ++ * NOTE: The sequence of attribute fields are as per the ++ * defined sequence in the attribute table in spec(or as ++ * per the enum sbi_mpxy_attr_id). ++ */ ++struct sbi_mpxy_channel_attrs { ++ /* Message protocol ID */ ++ u32 msg_proto_id; ++ /* Message protocol Version */ ++ u32 msg_proto_version; ++ /* Message protocol maximum message data length(bytes) */ ++ u32 msg_data_maxlen; ++ /* Message protocol message send timeout ++ * in microseconds */ ++ u32 msg_send_timeout; ++ /* Message protocol message response timeout in ++ * microseconds. Its the aggregate of msg_send_timeout ++ * and the timeout in receiving the response */ ++ u32 msg_completion_timeout; ++ /* Bit array for channel capabilities */ ++ u32 capability; ++ u32 sse_event_id; ++ u32 msi_control; ++ struct sbi_mpxy_msi_info msi_info; ++ /* Events State Control */ ++ u32 eventsstate_ctrl; ++}; ++ ++/** A Message proxy channel accessible through SBI interface */ ++struct sbi_mpxy_channel { ++ /** List head to a set of channels */ ++ struct sbi_dlist head; ++ u32 channel_id; ++ struct sbi_mpxy_channel_attrs attrs; ++ ++ /** ++ * Read message protocol attributes ++ * NOTE: inmem requires little-endian byte-ordering ++ */ ++ int (*read_attributes)(struct sbi_mpxy_channel *channel, ++ u32 *outmem, ++ u32 base_attr_id, ++ u32 attr_count); ++ ++ /** ++ * Write message protocol attributes ++ * NOTE: outmem requires little-endian byte-ordering ++ */ ++ int (*write_attributes)(struct sbi_mpxy_channel *channel, ++ u32 *inmem, ++ u32 base_attr_id, ++ u32 attr_count); ++ /** ++ * Send a message and wait for response ++ * NOTE: msgbuf requires little-endian byte-ordering ++ */ ++ int (*send_message_with_response)(struct sbi_mpxy_channel *channel, ++ u32 msg_id, void *msgbuf, u32 msg_len, ++ void *respbuf, u32 resp_max_len, ++ unsigned long *resp_len); ++ ++ /** Send message without response */ ++ int (*send_message_without_response)(struct sbi_mpxy_channel *channel, ++ u32 msg_id, void *msgbuf, u32 msg_len); ++ ++ /** ++ * Get notifications events if supported on a channel ++ * NOTE: eventsbuf requires little-endian byte-ordering ++ */ ++ int (*get_notification_events)(struct sbi_mpxy_channel *channel, ++ void *eventsbuf, u32 bufsize, ++ unsigned long *events_len); ++ ++ /** ++ * Callback to enable the events state reporting ++ * in the message protocol implementation ++ */ ++ void (*switch_eventsstate)(u32 enable); ++}; ++ ++/** Register a Message proxy channel */ ++int sbi_mpxy_register_channel(struct sbi_mpxy_channel *channel); ++ ++/** Initialize Message proxy subsystem */ ++int sbi_mpxy_init(struct sbi_scratch *scratch); ++ ++/** Check if some Message proxy channel is available */ ++bool sbi_mpxy_channel_available(void); ++ ++/** Get message proxy shared memory size */ ++unsigned long sbi_mpxy_get_shmem_size(void); ++ ++/** Set message proxy shared memory on the calling HART */ ++int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo, ++ unsigned long shmem_phys_hi, ++ unsigned long flags); ++ ++/** Get channel IDs list */ ++int sbi_mpxy_get_channel_ids(u32 start_index); ++ ++/** Read MPXY channel attributes */ ++int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count); ++ ++/** Write MPXY channel attributes */ ++int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count); ++ ++/** ++ * Send a message over a MPXY channel. ++ * In case if response is not expected, resp_data_len will be NULL. ++ */ ++int sbi_mpxy_send_message(u32 channel_id, u8 msg_id, ++ unsigned long msg_data_len, ++ unsigned long *resp_data_len); ++ ++/** Get Message proxy notification events */ ++int sbi_mpxy_get_notification_events(u32 channel_id, ++ unsigned long *events_len); ++ ++#endif +diff --git a/include/sbi/sbi_platform.h b/include/sbi/sbi_platform.h +index 8f959539..af743768 100644 +--- a/include/sbi/sbi_platform.h ++++ b/include/sbi/sbi_platform.h +@@ -29,12 +29,16 @@ + #define SBI_PLATFORM_HART_COUNT_OFFSET (0x50) + /** Offset of hart_stack_size in struct sbi_platform */ + #define SBI_PLATFORM_HART_STACK_SIZE_OFFSET (0x54) ++/** Offset of heap_size in struct sbi_platform */ ++#define SBI_PLATFORM_HEAP_SIZE_OFFSET (0x58) ++/** Offset of reserved in struct sbi_platform */ ++#define SBI_PLATFORM_RESERVED_OFFSET (0x5c) + /** Offset of platform_ops_addr in struct sbi_platform */ +-#define SBI_PLATFORM_OPS_OFFSET (0x58) ++#define SBI_PLATFORM_OPS_OFFSET (0x60) + /** Offset of firmware_context in struct sbi_platform */ +-#define SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET (0x58 + __SIZEOF_POINTER__) ++#define SBI_PLATFORM_FIRMWARE_CONTEXT_OFFSET (0x60 + __SIZEOF_POINTER__) + /** Offset of hart_index2id in struct sbi_platform */ +-#define SBI_PLATFORM_HART_INDEX2ID_OFFSET (0x58 + (__SIZEOF_POINTER__ * 2)) ++#define SBI_PLATFORM_HART_INDEX2ID_OFFSET (0x60 + (__SIZEOF_POINTER__ * 2)) + + #define SBI_PLATFORM_TLB_RANGE_FLUSH_LIMIT_DEFAULT (1UL << 12) + +@@ -46,7 +50,7 @@ + #include + + struct sbi_domain_memregion; +-struct sbi_trap_info; ++struct sbi_ecall_return; + struct sbi_trap_regs; + struct sbi_hart_features; + +@@ -123,18 +127,24 @@ struct sbi_platform_operations { + /** Exit platform timer for current HART */ + void (*timer_exit)(void); + ++ /** Initialize the platform Message Proxy(MPXY) driver */ ++ int (*mpxy_init)(void); ++ + /** platform specific SBI extension implementation probe function */ + int (*vendor_ext_check)(long extid); + /** platform specific SBI extension implementation provider */ + int (*vendor_ext_provider)(long extid, long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_value, +- struct sbi_trap_info *out_trap); ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out); + }; + + /** Platform default per-HART stack size for exception/interrupt handling */ + #define SBI_PLATFORM_DEFAULT_HART_STACK_SIZE 8192*8 + ++/** Platform default heap size */ ++#define SBI_PLATFORM_DEFAULT_HEAP_SIZE(__num_hart) \ ++ (0x8000 + 0x1000 * (__num_hart)) ++ + /** Representation of a platform */ + struct sbi_platform { + /** +@@ -157,6 +167,10 @@ struct sbi_platform { + u32 hart_count; + /** Per-HART stack size for exception/interrupt handling */ + u32 hart_stack_size; ++ /** Size of heap shared by all HARTs */ ++ u32 heap_size; ++ /** Reserved for future use */ ++ u32 reserved; + /** Pointer to sbi platform operations */ + unsigned long platform_ops_addr; + /** Pointer to system firmware specific context */ +@@ -615,6 +629,21 @@ static inline void sbi_platform_timer_exit(const struct sbi_platform *plat) + sbi_platform_ops(plat)->timer_exit(); + } + ++ ++/** ++ * Initialize the platform Message Proxy drivers ++ * ++ * @param plat pointer to struct sbi_platform ++ * ++ * @return 0 on success and negative error code on failure ++ */ ++static inline int sbi_platform_mpxy_init(const struct sbi_platform *plat) ++{ ++ if (plat && sbi_platform_ops(plat)->mpxy_init) ++ return sbi_platform_ops(plat)->mpxy_init(); ++ return 0; ++} ++ + /** + * Check if a vendor extension is implemented or not. + * +@@ -647,16 +676,12 @@ static inline int sbi_platform_vendor_ext_check(const struct sbi_platform *plat, + static inline int sbi_platform_vendor_ext_provider( + const struct sbi_platform *plat, + long extid, long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_value, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { +- if (plat && sbi_platform_ops(plat)->vendor_ext_provider) { +- return sbi_platform_ops(plat)->vendor_ext_provider(extid, +- funcid, regs, +- out_value, +- out_trap); +- } ++ if (plat && sbi_platform_ops(plat)->vendor_ext_provider) ++ return sbi_platform_ops(plat)->vendor_ext_provider( ++ extid, funcid, regs, out); + + return SBI_ENOTSUPP; + } +diff --git a/include/sbi/sbi_scratch.h b/include/sbi/sbi_scratch.h +index 40a3bc93..b01f8ef6 100644 +--- a/include/sbi/sbi_scratch.h ++++ b/include/sbi/sbi_scratch.h +@@ -18,26 +18,30 @@ + #define SBI_SCRATCH_FW_START_OFFSET (0 * __SIZEOF_POINTER__) + /** Offset of fw_size member in sbi_scratch */ + #define SBI_SCRATCH_FW_SIZE_OFFSET (1 * __SIZEOF_POINTER__) ++/** Offset of fw_heap_offset member in sbi_scratch */ ++#define SBI_SCRATCH_FW_HEAP_OFFSET (2 * __SIZEOF_POINTER__) ++/** Offset of fw_heap_size_offset member in sbi_scratch */ ++#define SBI_SCRATCH_FW_HEAP_SIZE_OFFSET (3 * __SIZEOF_POINTER__) + /** Offset of next_arg1 member in sbi_scratch */ +-#define SBI_SCRATCH_NEXT_ARG1_OFFSET (2 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_NEXT_ARG1_OFFSET (4 * __SIZEOF_POINTER__) + /** Offset of next_addr member in sbi_scratch */ +-#define SBI_SCRATCH_NEXT_ADDR_OFFSET (3 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_NEXT_ADDR_OFFSET (5 * __SIZEOF_POINTER__) + /** Offset of next_mode member in sbi_scratch */ +-#define SBI_SCRATCH_NEXT_MODE_OFFSET (4 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_NEXT_MODE_OFFSET (6 * __SIZEOF_POINTER__) + /** Offset of warmboot_addr member in sbi_scratch */ +-#define SBI_SCRATCH_WARMBOOT_ADDR_OFFSET (5 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_WARMBOOT_ADDR_OFFSET (7 * __SIZEOF_POINTER__) + /** Offset of platform_addr member in sbi_scratch */ +-#define SBI_SCRATCH_PLATFORM_ADDR_OFFSET (6 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_PLATFORM_ADDR_OFFSET (8 * __SIZEOF_POINTER__) + /** Offset of hartid_to_scratch member in sbi_scratch */ +-#define SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET (7 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_HARTID_TO_SCRATCH_OFFSET (9 * __SIZEOF_POINTER__) + /** Offset of trap_exit member in sbi_scratch */ +-#define SBI_SCRATCH_TRAP_EXIT_OFFSET (8 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_TRAP_EXIT_OFFSET (10 * __SIZEOF_POINTER__) + /** Offset of tmp0 member in sbi_scratch */ +-#define SBI_SCRATCH_TMP0_OFFSET (9 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_TMP0_OFFSET (11 * __SIZEOF_POINTER__) + /** Offset of options member in sbi_scratch */ +-#define SBI_SCRATCH_OPTIONS_OFFSET (10 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_OPTIONS_OFFSET (12 * __SIZEOF_POINTER__) + /** Offset of extra space in sbi_scratch */ +-#define SBI_SCRATCH_EXTRA_SPACE_OFFSET (11 * __SIZEOF_POINTER__) ++#define SBI_SCRATCH_EXTRA_SPACE_OFFSET (13 * __SIZEOF_POINTER__) + /** Maximum size of sbi_scratch (4KB) */ + #define SBI_SCRATCH_SIZE (0x1000) + +@@ -53,6 +57,10 @@ struct sbi_scratch { + unsigned long fw_start; + /** Size (in bytes) of firmware linked to OpenSBI library */ + unsigned long fw_size; ++ /** Offset (in bytes) of the heap area */ ++ unsigned long fw_heap_offset; ++ /** Size (in bytes) of the heap area */ ++ unsigned long fw_heap_size; + /** Arg1 (or 'a1' register) of next booting stage for this HART */ + unsigned long next_arg1; + /** Address of next booting stage for this HART */ +@@ -87,6 +95,16 @@ _Static_assert( + == SBI_SCRATCH_FW_SIZE_OFFSET, + "struct sbi_scratch definition has changed, please redefine " + "SBI_SCRATCH_FW_SIZE_OFFSET"); ++_Static_assert( ++ offsetof(struct sbi_scratch, fw_heap_offset) ++ == SBI_SCRATCH_FW_HEAP_OFFSET, ++ "struct sbi_scratch definition has changed, please redefine " ++ "SBI_SCRATCH_FW_HEAP_OFFSET"); ++_Static_assert( ++ offsetof(struct sbi_scratch, fw_heap_size) ++ == SBI_SCRATCH_FW_HEAP_SIZE_OFFSET, ++ "struct sbi_scratch definition has changed, please redefine " ++ "SBI_SCRATCH_FW_HEAP_SIZE_OFFSET"); + _Static_assert( + offsetof(struct sbi_scratch, next_arg1) + == SBI_SCRATCH_NEXT_ARG1_OFFSET, +@@ -170,6 +188,47 @@ void sbi_scratch_free_offset(unsigned long offset); + #define sbi_scratch_thishart_offset_ptr(offset) \ + (void *)((char *)sbi_scratch_thishart_ptr() + (offset)) + ++ ++/** Read a data type from sbi_scratch at given offset */ ++#define sbi_scratch_read_type(__scratch, __type, __offset) \ ++({ \ ++ *((__type *)sbi_scratch_offset_ptr((__scratch), (__offset))); \ ++}) ++ ++/** Write a data type to sbi_scratch at given offset */ ++#define sbi_scratch_write_type(__scratch, __type, __offset, __ptr) \ ++do { \ ++ *((__type *)sbi_scratch_offset_ptr((__scratch), (__offset))) \ ++ = (__type)(__ptr); \ ++} while (0) ++ ++/** Number of harts managed by this OpenSBI instance */ ++extern u32 sbi_scratch_hart_count; ++ ++/** Get the number of harts managed by this OpenSBI instance */ ++#define sbi_hart_count() sbi_scratch_hart_count ++ ++/** Check whether a particular HART index is valid or not */ ++#define sbi_hartindex_valid(__hartindex) ((__hartindex) < sbi_hart_count()) ++ ++/** HART index to HART id table */ ++extern u32 hartindex_to_hartid_table[]; ++ ++/** Get sbi_scratch from HART index */ ++#define sbi_hartindex_to_hartid(__hartindex) \ ++({ \ ++ ((__hartindex) < SBI_HARTMASK_MAX_BITS) ? \ ++ hartindex_to_hartid_table[__hartindex] : -1U; \ ++}) ++ ++/** ++ * Get logical index for given HART id ++ * @param hartid physical HART id ++ * @returns value between 0 to SBI_HARTMASK_MAX_BITS upon success and ++ * SBI_HARTMASK_MAX_BITS upon failure. ++ */ ++u32 sbi_hartid_to_hartindex(u32 hartid); ++ + /** HART id to scratch table */ + extern struct sbi_scratch *hartid_to_scratch_table[]; + +diff --git a/include/sbi/sbi_slist.h b/include/sbi/sbi_slist.h +new file mode 100644 +index 00000000..e4b83cfd +--- /dev/null ++++ b/include/sbi/sbi_slist.h +@@ -0,0 +1,33 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Simple simply-linked list library. ++ * ++ * Copyright (c) 2025 Rivos Inc. ++ * ++ * Authors: ++ * Clément Léger ++ */ ++ ++#ifndef __SBI_SLIST_H__ ++#define __SBI_SLIST_H__ ++ ++#include ++ ++#define SBI_SLIST_HEAD_INIT(_ptr) (_ptr) ++#define SBI_SLIST_HEAD(_lname, _stype) struct _stype *_lname ++#define SBI_SLIST_NODE(_stype) SBI_SLIST_HEAD(next, _stype) ++#define SBI_SLIST_NODE_INIT(_ptr) .next = _ptr ++ ++#define SBI_INIT_SLIST_HEAD(_head) (_head) = NULL ++ ++#define SBI_SLIST_ADD(_ptr, _head) \ ++do { \ ++ (_ptr)->next = _head; \ ++ (_head) = _ptr; \ ++} while (0) ++ ++#define SBI_SLIST_FOR_EACH_ENTRY(_ptr, _head) \ ++ for (_ptr = _head; _ptr; _ptr = _ptr->next) ++ ++#endif +diff --git a/include/sbi/sbi_sse.h b/include/sbi/sbi_sse.h +new file mode 100644 +index 00000000..fb796545 +--- /dev/null ++++ b/include/sbi/sbi_sse.h +@@ -0,0 +1,95 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Rivos Systems. ++ */ ++ ++#ifndef __SBI_SSE_H__ ++#define __SBI_SSE_H__ ++ ++#include ++#include ++#include ++ ++struct sbi_scratch; ++struct sbi_trap_regs; ++struct sbi_ecall_return; ++ ++#define EXC_MODE_PP_SHIFT 0 ++#define EXC_MODE_PP BIT(EXC_MODE_PP_SHIFT) ++#define EXC_MODE_PV_SHIFT 1 ++#define EXC_MODE_PV BIT(EXC_MODE_PV_SHIFT) ++#define EXC_MODE_SSTATUS_SPIE_SHIFT 2 ++#define EXC_MODE_SSTATUS_SPIE BIT(EXC_MODE_SSTATUS_SPIE_SHIFT) ++ ++struct sbi_sse_cb_ops { ++ /** ++ * Called when hart_id is changed on the event. ++ */ ++ void (*set_hartid_cb)(uint32_t event_id, unsigned long hart_id); ++ ++ /** ++ * Called when the SBI_EXT_SSE_COMPLETE is invoked on the event. ++ */ ++ void (*complete_cb)(uint32_t event_id); ++ ++ /** ++ * Called when the SBI_EXT_SSE_REGISTER is invoked on the event. ++ */ ++ void (*register_cb)(uint32_t event_id); ++ ++ /** ++ * Called when the SBI_EXT_SSE_UNREGISTER is invoked on the event. ++ */ ++ void (*unregister_cb)(uint32_t event_id); ++ ++ /** ++ * Called when the SBI_EXT_SSE_ENABLE is invoked on the event. ++ */ ++ void (*enable_cb)(uint32_t event_id); ++ ++ /** ++ * Called when the SBI_EXT_SSE_DISABLE is invoked on the event. ++ */ ++ void (*disable_cb)(uint32_t event_id); ++}; ++ ++/* Add a supported event with associated callback operations ++ * @param event_id Event identifier (SBI_SSE_EVENT_* or a custom platform one) ++ * @param cb_ops Callback operations (Can be NULL if any) ++ * @return 0 on success, error otherwise ++ */ ++int sbi_sse_add_event(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops); ++ ++/* Inject an event to the current hard ++ * @param event_id Event identifier (SBI_SSE_EVENT_*) ++ * @param regs Registers that were used on SBI entry ++ * @return 0 on success, error otherwise ++ */ ++int sbi_sse_inject_event(uint32_t event_id); ++ ++void sbi_sse_process_pending_events(struct sbi_trap_regs *regs); ++ ++ ++int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot); ++void sbi_sse_exit(struct sbi_scratch *scratch); ++ ++/* Interface called from sbi_ecall_sse.c */ ++int sbi_sse_register(uint32_t event_id, unsigned long handler_entry_pc, ++ unsigned long handler_entry_arg); ++int sbi_sse_unregister(uint32_t event_id); ++int sbi_sse_hart_mask(void); ++int sbi_sse_hart_unmask(void); ++int sbi_sse_enable(uint32_t event_id); ++int sbi_sse_disable(uint32_t event_id); ++int sbi_sse_complete(struct sbi_trap_regs *regs, struct sbi_ecall_return *out); ++int sbi_sse_inject_from_ecall(uint32_t event_id, unsigned long hart_id, ++ struct sbi_ecall_return *out); ++int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id, ++ uint32_t attr_count, unsigned long output_phys_lo, ++ unsigned long output_phys_hi); ++int sbi_sse_write_attrs(uint32_t event_id, uint32_t base_attr_id, ++ uint32_t attr_count, unsigned long input_phys_lo, ++ unsigned long input_phys_hi); ++ ++#endif +diff --git a/include/sbi/sbi_trap.h b/include/sbi/sbi_trap.h +index a562b95e..e2b6459f 100644 +--- a/include/sbi/sbi_trap.h ++++ b/include/sbi/sbi_trap.h +@@ -224,6 +224,11 @@ static inline unsigned long sbi_regs_gva(const struct sbi_trap_regs *regs) + #endif + } + ++static inline int sbi_mstatus_prev_mode(unsigned long mstatus) ++{ ++ return (mstatus & MSTATUS_MPP) >> MSTATUS_MPP_SHIFT; ++} ++ + int sbi_trap_redirect(struct sbi_trap_regs *regs, + struct sbi_trap_info *trap); + +diff --git a/include/sbi/sbi_types.h b/include/sbi/sbi_types.h +index 7fb1af7b..ec624162 100644 +--- a/include/sbi/sbi_types.h ++++ b/include/sbi/sbi_types.h +@@ -54,6 +54,13 @@ typedef unsigned long virtual_size_t; + typedef unsigned long physical_addr_t; + typedef unsigned long physical_size_t; + ++typedef uint16_t le16_t; ++typedef uint16_t be16_t; ++typedef uint32_t le32_t; ++typedef uint32_t be32_t; ++typedef uint64_t le64_t; ++typedef uint64_t be64_t; ++ + #define TRUE 1 + #define FALSE 0 + #define true TRUE +diff --git a/include/sbi_utils/fdt/fdt_driver.h b/include/sbi_utils/fdt/fdt_driver.h +new file mode 100644 +index 00000000..4102679e +--- /dev/null ++++ b/include/sbi_utils/fdt/fdt_driver.h +@@ -0,0 +1,63 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * fdt_driver.h - Generic support for initializing drivers from DT nodes. ++ * ++ * Copyright (c) 2024 SiFive ++ */ ++ ++#ifndef __FDT_DRIVER_H__ ++#define __FDT_DRIVER_H__ ++ ++#include ++ ++struct fdt_driver { ++ const struct fdt_match *match_table; ++ int (*init)(const void *fdt, int nodeoff, ++ const struct fdt_match *match); ++ bool experimental; ++}; ++ ++/* List of early FDT drivers generated at compile time */ ++extern const struct fdt_driver *fdt_early_drivers[]; ++ ++/** ++ * Initialize a driver instance for a specific DT node ++ * ++ * @param fdt devicetree blob ++ * @param nodeoff offset of a node in the devicetree blob ++ * @param drivers NULL-terminated array of drivers to match against this node ++ * ++ * @return 0 if a driver was matched and successfully initialized or a negative ++ * error code on failure ++ */ ++int fdt_driver_init_by_offset(const void *fdt, int nodeoff, ++ const struct fdt_driver *const *drivers); ++ ++/** ++ * Initialize a driver instance for each DT node that matches any of the ++ * provided drivers ++ * ++ * @param fdt devicetree blob ++ * @param drivers NULL-terminated array of drivers to match against each node ++ * ++ * @return 0 if drivers for all matches (if any) were successfully initialized ++ * or a negative error code on failure ++ */ ++int fdt_driver_init_all(const void *fdt, ++ const struct fdt_driver *const *drivers); ++ ++/** ++ * Initialize a driver instance for the first DT node that matches any of the ++ * provided drivers ++ * ++ * @param fdt devicetree blob ++ * @param drivers NULL-terminated array of drivers to match against each node ++ * ++ * @return 0 if a driver was matched and successfully initialized or a negative ++ * error code on failure ++ */ ++int fdt_driver_init_one(const void *fdt, ++ const struct fdt_driver *const *drivers); ++ ++#endif /* __FDT_DRIVER_H__ */ +diff --git a/include/sbi_utils/mailbox/fdt_mailbox.h b/include/sbi_utils/mailbox/fdt_mailbox.h +new file mode 100644 +index 00000000..0d5df340 +--- /dev/null ++++ b/include/sbi_utils/mailbox/fdt_mailbox.h +@@ -0,0 +1,35 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __FDT_MAILBOX_H__ ++#define __FDT_MAILBOX_H__ ++ ++#include ++#include ++ ++struct fdt_phandle_args; ++ ++/** FDT based mailbox driver */ ++struct fdt_mailbox { ++ struct fdt_driver driver; ++ int (*xlate)(struct mbox_controller *mbox, ++ const struct fdt_phandle_args *pargs, ++ u32 *out_chan_args); ++}; ++ ++/** Request a mailbox channel using "mboxes" DT property of client DT node */ ++int fdt_mailbox_request_chan(const void *fdt, int nodeoff, int index, ++ struct mbox_chan **out_chan); ++ ++/** Simple xlate function to convert one mailbox FDT cell into channel args */ ++int fdt_mailbox_simple_xlate(struct mbox_controller *mbox, ++ const struct fdt_phandle_args *pargs, ++ u32 *out_chan_args); ++ ++#endif +diff --git a/include/sbi_utils/mailbox/mailbox.h b/include/sbi_utils/mailbox/mailbox.h +new file mode 100644 +index 00000000..46fd8770 +--- /dev/null ++++ b/include/sbi_utils/mailbox/mailbox.h +@@ -0,0 +1,180 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __MAILBOX_H__ ++#define __MAILBOX_H__ ++ ++#include ++#include ++#include ++ ++/** Representation of a mailbox channel */ ++struct mbox_chan { ++ /** List head */ ++ struct sbi_dlist node; ++ /** Pointer to the mailbox controller */ ++ struct mbox_controller *mbox; ++ /** ++ * Arguments (or parameters) to identify a mailbox channel ++ * within a mailbox controller. ++ */ ++#define MBOX_CHAN_MAX_ARGS 2 ++ u32 chan_args[MBOX_CHAN_MAX_ARGS]; ++}; ++ ++#define to_mbox_chan(__node) \ ++ container_of((__node), struct mbox_chan, node) ++ ++/** ++ * Representation of a mailbox data transfer ++ * ++ * NOTE: If both "tx" and "rx" are non-NULL then Tx is done before Rx. ++ */ ++struct mbox_xfer { ++#define MBOX_XFER_SEQ (1UL << 0) ++ /** Transfer flags */ ++ unsigned long flags; ++ /** Transfer arguments (or parameters) */ ++ void *args; ++ /** ++ * Sequence number ++ * ++ * If MBOX_XFER_SEQ is not set in flags then mbox_chan_xfer() ++ * will generate a unique sequence number and update this field ++ * else mbox_chan_xfer() will blindly use the sequence number ++ * specified by this field. ++ */ ++ long seq; ++ /** Send data pointer */ ++ void *tx; ++ /** Send data length (valid only if tx != NULL) */ ++ unsigned long tx_len; ++ /** ++ * Send timeout milliseconds (valid only if tx != NULL) ++ * ++ * If this field is non-zero along with tx != NULL then the ++ * mailbox controller driver will wait specified milliseconds ++ * for send data transfer to complete else the mailbox controller ++ * driver will not wait. ++ */ ++ unsigned long tx_timeout; ++ /** Receive data pointer */ ++ void *rx; ++ /** Receive data length (valid only if rx != NULL) */ ++ unsigned long rx_len; ++ /** ++ * Receive timeout milliseconds (valid only if rx != NULL) ++ * ++ * If this field is non-zero along with rx != NULL then the ++ * mailbox controller driver will wait specified milliseconds ++ * for receive data transfer to complete else the mailbox ++ * controller driver will not wait. ++ */ ++ unsigned long rx_timeout; ++}; ++ ++#define mbox_xfer_init_tx(__p, __a, __t, __t_len, __t_tim) \ ++do { \ ++ (__p)->flags = 0; \ ++ (__p)->args = (__a); \ ++ (__p)->tx = (__t); \ ++ (__p)->tx_len = (__t_len); \ ++ (__p)->tx_timeout = (__t_tim); \ ++ (__p)->rx = NULL; \ ++ (__p)->rx_len = 0; \ ++ (__p)->rx_timeout = 0; \ ++} while (0) ++ ++#define mbox_xfer_init_rx(__p, __a, __r, __r_len, __r_tim) \ ++do { \ ++ (__p)->flags = 0; \ ++ (__p)->args = (__a); \ ++ (__p)->tx = NULL; \ ++ (__p)->tx_len = 0; \ ++ (__p)->tx_timeout = 0; \ ++ (__p)->rx = (__r); \ ++ (__p)->rx_len = (__r_len); \ ++ (__p)->rx_timeout = (__r_tim); \ ++} while (0) ++ ++#define mbox_xfer_init_txrx(__p, __a, __t, __t_len, __t_tim, __r, __r_len, __r_tim)\ ++do { \ ++ (__p)->flags = 0; \ ++ (__p)->args = (__a); \ ++ (__p)->tx = (__t); \ ++ (__p)->tx_len = (__t_len); \ ++ (__p)->tx_timeout = (__t_tim); \ ++ (__p)->rx = (__r); \ ++ (__p)->rx_len = (__r_len); \ ++ (__p)->rx_timeout = (__r_tim); \ ++} while (0) ++ ++#define mbox_xfer_set_sequence(__p, __seq) \ ++do { \ ++ (__p)->flags |= MBOX_XFER_SEQ; \ ++ (__p)->seq = (__seq); \ ++} while (0) ++ ++/** Representation of a mailbox controller */ ++struct mbox_controller { ++ /** List head */ ++ struct sbi_dlist node; ++ /** Next sequence atomic counter */ ++ atomic_t xfer_next_seq; ++ /* List of mailbox channels */ ++ struct sbi_dlist chan_list; ++ /** Unique ID of the mailbox controller assigned by the driver */ ++ unsigned int id; ++ /** Maximum length of transfer supported by the mailbox controller */ ++ unsigned int max_xfer_len; ++ /** Pointer to mailbox driver owning this mailbox controller */ ++ void *driver; ++ /** Request a mailbox channel from the mailbox controller */ ++ struct mbox_chan *(*request_chan)(struct mbox_controller *mbox, ++ u32 *chan_args); ++ /** Free a mailbox channel from the mailbox controller */ ++ void (*free_chan)(struct mbox_controller *mbox, ++ struct mbox_chan *chan); ++ /** Transfer data over mailbox channel */ ++ int (*xfer)(struct mbox_chan *chan, struct mbox_xfer *xfer); ++ /** Get an attribute of mailbox channel */ ++ int (*get_attribute)(struct mbox_chan *chan, int attr_id, void *out_value); ++ /** Set an attribute of mailbox channel */ ++ int (*set_attribute)(struct mbox_chan *chan, int attr_id, void *new_value); ++}; ++ ++#define to_mbox_controller(__node) \ ++ container_of((__node), struct mbox_controller, node) ++ ++/** Find a registered mailbox controller */ ++struct mbox_controller *mbox_controller_find(unsigned int id); ++ ++/** Register mailbox controller */ ++int mbox_controller_add(struct mbox_controller *mbox); ++ ++/** Un-register mailbox controller */ ++void mbox_controller_remove(struct mbox_controller *mbox); ++ ++/** Request a mailbox channel */ ++struct mbox_chan *mbox_controller_request_chan(struct mbox_controller *mbox, ++ u32 *chan_args); ++ ++/** Free a mailbox channel */ ++void mbox_controller_free_chan(struct mbox_chan *chan); ++ ++/** Data transfer over mailbox channel */ ++int mbox_chan_xfer(struct mbox_chan *chan, struct mbox_xfer *xfer); ++ ++/** Get an attribute of mailbox channel */ ++int mbox_chan_get_attribute(struct mbox_chan *chan, int attr_id, void *out_value); ++ ++/** Set an attribute of mailbox channel */ ++int mbox_chan_set_attribute(struct mbox_chan *chan, int attr_id, void *new_value); ++ ++#endif +diff --git a/include/sbi_utils/mailbox/rpmi_mailbox.h b/include/sbi_utils/mailbox/rpmi_mailbox.h +new file mode 100644 +index 00000000..a23ad8ca +--- /dev/null ++++ b/include/sbi_utils/mailbox/rpmi_mailbox.h +@@ -0,0 +1,33 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __RPMI_MAILBOX_H__ ++#define __RPMI_MAILBOX_H__ ++ ++#include ++#include ++#include ++ ++#define rpmi_u32_count(__var) (sizeof(__var) / sizeof(u32)) ++ ++/** Convert RPMI error to SBI error */ ++int rpmi_xlate_error(enum rpmi_error error); ++ ++/** Typical RPMI normal request with at least status code in response */ ++int rpmi_normal_request_with_status( ++ struct mbox_chan *chan, u32 service_id, ++ void *req, u32 req_words, u32 req_endian_words, ++ void *resp, u32 resp_words, u32 resp_endian_words); ++ ++/* RPMI posted request which is without any response*/ ++int rpmi_posted_request( ++ struct mbox_chan *chan, u32 service_id, ++ void *req, u32 req_words, u32 req_endian_words); ++ ++#endif /* !__RPMI_MAILBOX_H__ */ +diff --git a/include/sbi_utils/mailbox/rpmi_msgprot.h b/include/sbi_utils/mailbox/rpmi_msgprot.h +new file mode 100644 +index 00000000..a761b560 +--- /dev/null ++++ b/include/sbi_utils/mailbox/rpmi_msgprot.h +@@ -0,0 +1,706 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Rahul Pathak ++ * Subrahmanya Lingappa ++ */ ++ ++#ifndef __RPMI_MSGPROT_H__ ++#define __RPMI_MSGPROT_H__ ++ ++#include ++#include ++ ++/* ++ * 31 0 ++ * +---------------------+-----------------------+ ++ * | FLAGS | SERVICE_ID | SERVICEGROUP_ID | ++ * +---------------------+-----------------------+ ++ * | TOKEN | DATA LENGTH | ++ * +---------------------+-----------------------+ ++ * | DATA/PAYLOAD | ++ * +---------------------------------------------+ ++ */ ++ ++/** Message Header byte offset */ ++#define RPMI_MSG_HDR_OFFSET (0x0) ++/** Message Header Size in bytes */ ++#define RPMI_MSG_HDR_SIZE (8) ++ ++/** ServiceGroup ID field byte offset */ ++#define RPMI_MSG_SERVICEGROUP_ID_OFFSET (0x0) ++/** ServiceGroup ID field size in bytes */ ++#define RPMI_MSG_SERVICEGROUP_ID_SIZE (2) ++ ++/** Service ID field byte offset */ ++#define RPMI_MSG_SERVICE_ID_OFFSET (0x2) ++/** Service ID field size in bytes */ ++#define RPMI_MSG_SERVICE_ID_SIZE (1) ++ ++/** Flags field byte offset */ ++#define RPMI_MSG_FLAGS_OFFSET (0x3) ++/** Flags field size in bytes */ ++#define RPMI_MSG_FLAGS_SIZE (1) ++ ++#define RPMI_MSG_FLAGS_TYPE_POS (0U) ++#define RPMI_MSG_FLAGS_TYPE_MASK 0x7 ++#define RPMI_MSG_FLAGS_TYPE \ ++ ((0x7) << RPMI_MSG_FLAGS_TYPE_POS) ++ ++#define RPMI_MSG_FLAGS_DOORBELL_POS (3U) ++#define RPMI_MSG_FLAGS_DOORBELL_MASK 0x1 ++#define RPMI_MSG_FLAGS_DOORBELL \ ++ ((0x1) << RPMI_MSG_FLAGS_DOORBELL_POS) ++ ++/** Data length field byte offset */ ++#define RPMI_MSG_DATALEN_OFFSET (0x4) ++/** Data length field size in bytes */ ++#define RPMI_MSG_DATALEN_SIZE (2) ++ ++/** Token field byte offset */ ++#define RPMI_MSG_TOKEN_OFFSET (0x6) ++/** Token field size in bytes */ ++#define RPMI_MSG_TOKEN_SIZE (2) ++/** Token field mask */ ++#define RPMI_MSG_TOKEN_MASK (0xffffU) ++ ++/** Data field byte offset */ ++#define RPMI_MSG_DATA_OFFSET (RPMI_MSG_HDR_SIZE) ++/** Data field size in bytes */ ++#define RPMI_MSG_DATA_SIZE(__slot_size) ((__slot_size) - RPMI_MSG_HDR_SIZE) ++ ++/** Minimum slot size in bytes */ ++#define RPMI_SLOT_SIZE_MIN (64) ++ ++/** Name length of 16 characters */ ++#define RPMI_NAME_CHARS_MAX (16) ++ ++/** Queue layout */ ++#define RPMI_QUEUE_HEAD_SLOT 0 ++#define RPMI_QUEUE_TAIL_SLOT 1 ++#define RPMI_QUEUE_HEADER_SLOTS 2 ++ ++/** Default timeout values */ ++#define RPMI_DEF_TX_TIMEOUT 20 ++#define RPMI_DEF_RX_TIMEOUT 20 ++ ++/** ++ * Common macro to generate composite version from major ++ * and minor version numbers. ++ * ++ * RPMI has Specification version, Implementation version ++ * Service group versions which follow the same versioning ++ * encoding as below. ++ */ ++#define RPMI_VERSION(__major, __minor) (((__major) << 16) | (__minor)) ++ ++/** RPMI Message Header */ ++struct rpmi_message_header { ++ le16_t servicegroup_id; ++ uint8_t service_id; ++ uint8_t flags; ++ le16_t datalen; ++ le16_t token; ++} __packed; ++ ++/** RPMI Message */ ++struct rpmi_message { ++ struct rpmi_message_header header; ++ u8 data[0]; ++} __packed; ++ ++/** RPMI Messages Types */ ++enum rpmi_message_type { ++ /* Normal request backed with ack */ ++ RPMI_MSG_NORMAL_REQUEST = 0x0, ++ /* Request without any ack */ ++ RPMI_MSG_POSTED_REQUEST = 0x1, ++ /* Acknowledgment for normal request message */ ++ RPMI_MSG_ACKNOWLDGEMENT = 0x2, ++ /* Notification message */ ++ RPMI_MSG_NOTIFICATION = 0x3, ++}; ++ ++/** RPMI Error Types */ ++enum rpmi_error { ++ /* Success */ ++ RPMI_SUCCESS = 0, ++ /* General failure */ ++ RPMI_ERR_FAILED = -1, ++ /* Service or feature not supported */ ++ RPMI_ERR_NOTSUPP = -2, ++ /* Invalid Parameter */ ++ RPMI_ERR_INVALID_PARAM = -3, ++ /* ++ * Denied to insufficient permissions ++ * or due to unmet prerequisite ++ */ ++ RPMI_ERR_DENIED = -4, ++ /* Invalid address or offset */ ++ RPMI_ERR_INVALID_ADDR = -5, ++ /* ++ * Operation failed as it was already in ++ * progress or the state has changed already ++ * for which the operation was carried out. ++ */ ++ RPMI_ERR_ALREADY = -6, ++ /* ++ * Error in implementation which violates ++ * the specification version ++ */ ++ RPMI_ERR_EXTENSION = -7, ++ /* Operation failed due to hardware issues */ ++ RPMI_ERR_HW_FAULT = -8, ++ /* System, device or resource is busy */ ++ RPMI_ERR_BUSY = -9, ++ /* System or device or resource in invalid state */ ++ RPMI_ERR_INVALID_STATE = -10, ++ /* Index, offset or address is out of range */ ++ RPMI_ERR_BAD_RANGE = -11, ++ /* Operation timed out */ ++ RPMI_ERR_TIMEOUT = -12, ++ /* ++ * Error in input or output or ++ * error in sending or receiving data ++ * through communication medium ++ */ ++ RPMI_ERR_IO = -13, ++ /* No data available */ ++ RPMI_ERR_NO_DATA = -14, ++ RPMI_ERR_RESERVED_START = -15, ++ RPMI_ERR_RESERVED_END = -127, ++ RPMI_ERR_VENDOR_START = -128, ++}; ++ ++/** RPMI Mailbox Message Arguments */ ++struct rpmi_message_args { ++ u32 flags; ++#define RPMI_MSG_FLAGS_NO_TX (1U << 0) ++#define RPMI_MSG_FLAGS_NO_RX (1U << 1) ++#define RPMI_MSG_FLAGS_NO_RX_TOKEN (1U << 2) ++ enum rpmi_message_type type; ++ u8 service_id; ++ u32 tx_endian_words; ++ u32 rx_endian_words; ++ u16 rx_token; ++ u32 rx_data_len; ++}; ++ ++/** RPMI Mailbox Channel Attribute IDs */ ++enum rpmi_channel_attribute_id { ++ RPMI_CHANNEL_ATTR_PROTOCOL_VERSION = 0, ++ RPMI_CHANNEL_ATTR_MAX_DATA_LEN, ++ RPMI_CHANNEL_ATTR_P2A_DOORBELL_SYSMSI_INDEX, ++ RPMI_CHANNEL_ATTR_TX_TIMEOUT, ++ RPMI_CHANNEL_ATTR_RX_TIMEOUT, ++ RPMI_CHANNEL_ATTR_SERVICEGROUP_ID, ++ RPMI_CHANNEL_ATTR_SERVICEGROUP_VERSION, ++ RPMI_CHANNEL_ATTR_IMPL_ID, ++ RPMI_CHANNEL_ATTR_IMPL_VERSION, ++ RPMI_CHANNEL_ATTR_MAX, ++}; ++ ++/* ++ * RPMI SERVICEGROUPS AND SERVICES ++ */ ++ ++/** RPMI ServiceGroups IDs */ ++enum rpmi_servicegroup_id { ++ RPMI_SRVGRP_ID_MIN = 0, ++ RPMI_SRVGRP_BASE = 0x0001, ++ RPMI_SRVGRP_SYSTEM_MSI = 0x0002, ++ RPMI_SRVGRP_SYSTEM_RESET = 0x0003, ++ RPMI_SRVGRP_SYSTEM_SUSPEND = 0x0004, ++ RPMI_SRVGRP_HSM = 0x0005, ++ RPMI_SRVGRP_CPPC = 0x0006, ++ RPMI_SRVGRP_CLOCK = 0x0008, ++ RPMI_SRVGRP_ID_MAX_COUNT, ++ ++ /* Reserved range for service groups */ ++ RPMI_SRVGRP_RESERVE_START = RPMI_SRVGRP_ID_MAX_COUNT, ++ RPMI_SRVGRP_RESERVE_END = 0x7FFF, ++ ++ /* Vendor/Implementation-specific service groups range */ ++ RPMI_SRVGRP_VENDOR_START = 0x8000, ++ RPMI_SRVGRP_VENDOR_END = 0xFFFF, ++}; ++ ++/** RPMI enable notification request */ ++struct rpmi_enable_notification_req { ++ u32 eventid; ++}; ++ ++/** RPMI enable notification response */ ++struct rpmi_enable_notification_resp { ++ s32 status; ++}; ++ ++/** RPMI Base ServiceGroup Service IDs */ ++enum rpmi_base_service_id { ++ RPMI_BASE_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_BASE_SRV_GET_IMPLEMENTATION_VERSION = 0x02, ++ RPMI_BASE_SRV_GET_IMPLEMENTATION_IDN = 0x03, ++ RPMI_BASE_SRV_GET_SPEC_VERSION = 0x04, ++ RPMI_BASE_SRV_GET_PLATFORM_INFO = 0x05, ++ RPMI_BASE_SRV_PROBE_SERVICE_GROUP = 0x06, ++ RPMI_BASE_SRV_GET_ATTRIBUTES = 0x07, ++}; ++ ++#define RPMI_BASE_FLAGS_F0_PRIVILEGE (1U << 1) ++#define RPMI_BASE_FLAGS_F0_EV_NOTIFY (1U << 0) ++ ++enum rpmi_base_context_priv_level { ++ RPMI_BASE_CONTEXT_PRIV_S_MODE, ++ RPMI_BASE_CONTEXT_PRIV_M_MODE, ++}; ++ ++struct rpmi_base_get_attributes_resp { ++ s32 status_code; ++ u32 f0; ++ u32 f1; ++ u32 f2; ++ u32 f3; ++}; ++ ++struct rpmi_base_get_platform_info_resp { ++ s32 status; ++ u32 plat_info_len; ++ char plat_info[]; ++}; ++ ++/** RPMI System MSI ServiceGroup Service IDs */ ++enum rpmi_sysmsi_service_id { ++ RPMI_SYSMSI_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_SYSMSI_SRV_GET_ATTRIBUTES = 0x2, ++ RPMI_SYSMSI_SRV_GET_MSI_ATTRIBUTES = 0x3, ++ RPMI_SYSMSI_SRV_SET_MSI_STATE = 0x4, ++ RPMI_SYSMSI_SRV_GET_MSI_STATE = 0x5, ++ RPMI_SYSMSI_SRV_SET_MSI_TARGET = 0x6, ++ RPMI_SYSMSI_SRV_GET_MSI_TARGET = 0x7, ++ RPMI_SYSMSI_SRV_ID_MAX_COUNT, ++}; ++ ++/** Response for system MSI service group attributes */ ++struct rpmi_sysmsi_get_attributes_resp { ++ s32 status; ++ u32 sys_num_msi; ++ u32 flag0; ++ u32 flag1; ++}; ++ ++/** Request for system MSI attributes */ ++struct rpmi_sysmsi_get_msi_attributes_req { ++ u32 sys_msi_index; ++}; ++ ++/** Response for system MSI attributes */ ++struct rpmi_sysmsi_get_msi_attributes_resp { ++ s32 status; ++ u32 flag0; ++ u32 flag1; ++ u8 name[16]; ++}; ++ ++#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV (1U << 0) ++ ++/** Request for system MSI set state */ ++struct rpmi_sysmsi_set_msi_state_req { ++ u32 sys_msi_index; ++ u32 sys_msi_state; ++}; ++ ++#define RPMI_SYSMSI_MSI_STATE_ENABLE (1U << 0) ++#define RPMI_SYSMSI_MSI_STATE_PENDING (1U << 1) ++ ++/** Response for system MSI set state */ ++struct rpmi_sysmsi_set_msi_state_resp { ++ s32 status; ++}; ++ ++/** Request for system MSI get state */ ++struct rpmi_sysmsi_get_msi_state_req { ++ u32 sys_msi_index; ++}; ++ ++/** Response for system MSI get state */ ++struct rpmi_sysmsi_get_msi_state_resp { ++ s32 status; ++ u32 sys_msi_state; ++}; ++ ++/** Request for system MSI set target */ ++struct rpmi_sysmsi_set_msi_target_req { ++ u32 sys_msi_index; ++ u32 sys_msi_address_low; ++ u32 sys_msi_address_high; ++ u32 sys_msi_data; ++}; ++ ++/** Response for system MSI set target */ ++struct rpmi_sysmsi_set_msi_target_resp { ++ s32 status; ++}; ++ ++/** Request for system MSI get target */ ++struct rpmi_sysmsi_get_msi_target_req { ++ u32 sys_msi_index; ++}; ++ ++/** Response for system MSI get target */ ++struct rpmi_sysmsi_get_msi_target_resp { ++ s32 status; ++ u32 sys_msi_address_low; ++ u32 sys_msi_address_high; ++ u32 sys_msi_data; ++}; ++ ++/** RPMI System Reset ServiceGroup Service IDs */ ++enum rpmi_system_reset_service_id { ++ RPMI_SYSRST_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_SYSRST_SRV_GET_ATTRIBUTES = 0x02, ++ RPMI_SYSRST_SRV_SYSTEM_RESET = 0x03, ++ RPMI_SYSRST_SRV_ID_MAX_COUNT, ++}; ++ ++/** RPMI System Reset types */ ++enum rpmi_sysrst_reset_type { ++ RPMI_SYSRST_TYPE_SHUTDOWN = 0x0, ++ RPMI_SYSRST_TYPE_COLD_REBOOT = 0x1, ++ RPMI_SYSRST_TYPE_WARM_REBOOT = 0x2, ++ RPMI_SYSRST_TYPE_MAX, ++}; ++ ++#define RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_POS (1) ++#define RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_MASK \ ++ (1U << RPMI_SYSRST_ATTRS_FLAGS_RESETTYPE_POS) ++ ++/** Response for system reset attributes */ ++struct rpmi_sysrst_get_reset_attributes_resp { ++ s32 status; ++ u32 flags; ++}; ++ ++/** RPMI System Suspend ServiceGroup Service IDs */ ++enum rpmi_system_suspend_service_id { ++ RPMI_SYSSUSP_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_SYSSUSP_SRV_GET_ATTRIBUTES = 0x02, ++ RPMI_SYSSUSP_SRV_SYSTEM_SUSPEND = 0x03, ++ RPMI_SYSSUSP_SRV_ID_MAX_COUNT, ++}; ++ ++/** Request for system suspend attributes */ ++struct rpmi_syssusp_get_attr_req { ++ u32 susp_type; ++}; ++ ++#define RPMI_SYSSUSP_ATTRS_FLAGS_RESUMEADDR (1U << 1) ++#define RPMI_SYSSUSP_ATTRS_FLAGS_SUSPENDTYPE 1U ++ ++/** Response for system suspend attributes */ ++struct rpmi_syssusp_get_attr_resp { ++ s32 status; ++ u32 flags; ++}; ++ ++struct rpmi_syssusp_suspend_req { ++ u32 hartid; ++ u32 suspend_type; ++ u32 resume_addr_lo; ++ u32 resume_addr_hi; ++}; ++ ++struct rpmi_syssusp_suspend_resp { ++ s32 status; ++}; ++ ++/** RPMI HSM State Management ServiceGroup Service IDs */ ++enum rpmi_hsm_service_id { ++ RPMI_HSM_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_HSM_SRV_GET_HART_STATUS = 0x02, ++ RPMI_HSM_SRV_GET_HART_LIST = 0x03, ++ RPMI_HSM_SRV_GET_SUSPEND_TYPES = 0x04, ++ RPMI_HSM_SRV_GET_SUSPEND_INFO = 0x05, ++ RPMI_HSM_SRV_HART_START = 0x06, ++ RPMI_HSM_SRV_HART_STOP = 0x07, ++ RPMI_HSM_SRV_HART_SUSPEND = 0x08, ++ RPMI_HSM_SRV_ID_MAX = 0x09, ++}; ++ ++/* HSM service group request and response structs */ ++struct rpmi_hsm_hart_start_req { ++ u32 hartid; ++ u32 start_addr_lo; ++ u32 start_addr_hi; ++}; ++ ++struct rpmi_hsm_hart_start_resp { ++ s32 status; ++}; ++ ++struct rpmi_hsm_hart_stop_req { ++ u32 hartid; ++}; ++ ++struct rpmi_hsm_hart_stop_resp { ++ s32 status; ++}; ++ ++struct rpmi_hsm_hart_susp_req { ++ u32 hartid; ++ u32 suspend_type; ++ u32 resume_addr_lo; ++ u32 resume_addr_hi; ++}; ++ ++struct rpmi_hsm_hart_susp_resp { ++ s32 status; ++}; ++ ++struct rpmi_hsm_get_hart_status_req { ++ u32 hartid; ++}; ++ ++struct rpmi_hsm_get_hart_status_resp { ++ s32 status; ++ u32 hart_status; ++}; ++ ++struct rpmi_hsm_get_hart_list_req { ++ u32 start_index; ++}; ++ ++struct rpmi_hsm_get_hart_list_resp { ++ s32 status; ++ u32 remaining; ++ u32 returned; ++ /* remaining space need to be adjusted for the above 3 u32's */ ++ u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)]; ++}; ++ ++struct rpmi_hsm_get_susp_types_req { ++ u32 start_index; ++}; ++ ++struct rpmi_hsm_get_susp_types_resp { ++ s32 status; ++ u32 remaining; ++ u32 returned; ++ /* remaining space need to be adjusted for the above 3 u32's */ ++ u32 types[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)]; ++}; ++ ++struct rpmi_hsm_get_susp_info_req { ++ u32 suspend_type; ++}; ++ ++#define RPMI_HSM_SUSPEND_INFO_FLAGS_TIMER_STOP 1U ++ ++struct rpmi_hsm_get_susp_info_resp { ++ s32 status; ++ u32 flags; ++ u32 entry_latency_us; ++ u32 exit_latency_us; ++ u32 wakeup_latency_us; ++ u32 min_residency_us; ++}; ++ ++/** RPMI CPPC ServiceGroup Service IDs */ ++enum rpmi_cppc_service_id { ++ RPMI_CPPC_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_CPPC_SRV_PROBE_REG = 0x02, ++ RPMI_CPPC_SRV_READ_REG = 0x03, ++ RPMI_CPPC_SRV_WRITE_REG = 0x04, ++ RPMI_CPPC_SRV_GET_FAST_CHANNEL_REGION = 0x05, ++ RPMI_CPPC_SRV_GET_FAST_CHANNEL_OFFSET = 0x06, ++ RPMI_CPPC_SRV_GET_HART_LIST = 0x07, ++ RPMI_CPPC_SRV_MAX_COUNT, ++}; ++ ++struct rpmi_cppc_probe_req { ++ u32 hart_id; ++ u32 reg_id; ++}; ++ ++struct rpmi_cppc_probe_resp { ++ s32 status; ++ u32 reg_len; ++}; ++ ++struct rpmi_cppc_read_reg_req { ++ u32 hart_id; ++ u32 reg_id; ++}; ++ ++struct rpmi_cppc_read_reg_resp { ++ s32 status; ++ u32 data_lo; ++ u32 data_hi; ++}; ++ ++struct rpmi_cppc_write_reg_req { ++ u32 hart_id; ++ u32 reg_id; ++ u32 data_lo; ++ u32 data_hi; ++}; ++ ++struct rpmi_cppc_write_reg_resp { ++ s32 status; ++}; ++ ++struct rpmi_cppc_get_fastchan_offset_req { ++ u32 hart_id; ++}; ++ ++struct rpmi_cppc_get_fastchan_offset_resp { ++ s32 status; ++ u32 fc_perf_request_offset_lo; ++ u32 fc_perf_request_offset_hi; ++ u32 fc_perf_feedback_offset_lo; ++ u32 fc_perf_feedback_offset_hi; ++}; ++ ++#define RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_POS 3 ++#define RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_MASK \ ++ (3U << RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_POS) ++#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS 1 ++#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_MASK \ ++ (3U << RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_WIDTH_POS) ++#define RPMI_CPPC_FAST_CHANNEL_FLAGS_DB_SUPPORTED (1U << 0) ++ ++struct rpmi_cppc_get_fastchan_region_resp { ++ s32 status; ++ u32 flags; ++ u32 region_addr_lo; ++ u32 region_addr_hi; ++ u32 region_size_lo; ++ u32 region_size_hi; ++ u32 db_addr_lo; ++ u32 db_addr_hi; ++ u32 db_setmask_lo; ++ u32 db_setmask_hi; ++ u32 db_preservemask_lo; ++ u32 db_preservemask_hi; ++}; ++ ++enum rpmi_cppc_fast_channel_db_width { ++ RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_8 = 0x0, ++ RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_16 = 0x1, ++ RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_32 = 0x2, ++ RPMI_CPPC_FAST_CHANNEL_DB_WIDTH_64 = 0x3, ++}; ++ ++enum rpmi_cppc_fast_channel_cppc_mode { ++ RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_PASSIVE = 0x0, ++ RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_ACTIVE = 0x1, ++ RPMI_CPPC_FAST_CHANNEL_CPPC_MODE_MAX_IDX, ++}; ++ ++struct rpmi_cppc_hart_list_req { ++ u32 start_index; ++}; ++ ++struct rpmi_cppc_hart_list_resp { ++ s32 status; ++ u32 remaining; ++ u32 returned; ++ /* remaining space need to be adjusted for the above 3 u32's */ ++ u32 hartid[(RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)]; ++}; ++ ++/** RPMI Clock ServiceGroup Service IDs */ ++enum rpmi_clock_service_id { ++ RPMI_CLOCK_SRV_ENABLE_NOTIFICATION = 0x01, ++ RPMI_CLOCK_SRV_GET_NUM_CLOCKS = 0x02, ++ RPMI_CLOCK_SRV_GET_ATTRIBUTES = 0x03, ++ RPMI_CLOCK_SRV_GET_SUPPORTED_RATES = 0x04, ++ RPMI_CLOCK_SRV_SET_CONFIG = 0x05, ++ RPMI_CLOCK_SRV_GET_CONFIG = 0x06, ++ RPMI_CLOCK_SRV_SET_RATE = 0x07, ++ RPMI_CLOCK_SRV_GET_RATE = 0x08, ++ RPMI_CLOCK_SRV_MAX_COUNT, ++}; ++ ++struct rpmi_clock_get_num_clocks_resp { ++ s32 status; ++ u32 num_clocks; ++}; ++ ++struct rpmi_clock_get_attributes_req { ++ u32 clock_id; ++}; ++ ++struct rpmi_clock_get_attributes_resp { ++ s32 status; ++#define RPMI_CLOCK_FLAGS_FORMAT_POS 30 ++#define RPMI_CLOCK_FLAGS_FORMAT_MASK \ ++ (3U << RPMI_CLOCK_FLAGS_CLOCK_FORMAT_POS) ++#define RPMI_CLOCK_FLAGS_FORMAT_DISCRETE 0 ++#define RPMI_CLOCK_FLAGS_FORMAT_LINEAR 1 ++ u32 flags; ++ u32 num_rates; ++ u32 transition_latency; ++ u8 name[16]; ++}; ++ ++struct rpmi_clock_get_supported_rates_req { ++ u32 clock_id; ++ u32 clock_rate_index; ++}; ++ ++struct rpmi_clock_get_supported_rates_resp { ++ s32 status; ++ u32 flags; ++ u32 remaining; ++ u32 returned; ++ u32 clock_rate[0]; ++}; ++ ++struct rpmi_clock_set_config_req { ++ u32 clock_id; ++#define RPMI_CLOCK_CONFIG_ENABLE (1U << 0) ++ u32 config; ++}; ++ ++struct rpmi_clock_set_config_resp { ++ s32 status; ++}; ++ ++struct rpmi_clock_get_config_req { ++ u32 clock_id; ++}; ++ ++struct rpmi_clock_get_config_resp { ++ s32 status; ++ u32 config; ++}; ++ ++struct rpmi_clock_set_rate_req { ++ u32 clock_id; ++#define RPMI_CLOCK_SET_RATE_FLAGS_MASK (3U << 0) ++#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_DOWN 0 ++#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_UP 1 ++#define RPMI_CLOCK_SET_RATE_FLAGS_ROUND_PLAT 2 ++ u32 flags; ++ u32 clock_rate_low; ++ u32 clock_rate_high; ++}; ++ ++struct rpmi_clock_set_rate_resp { ++ s32 status; ++}; ++ ++struct rpmi_clock_get_rate_req { ++ u32 clock_id; ++}; ++ ++struct rpmi_clock_get_rate_resp { ++ s32 status; ++ u32 clock_rate_low; ++ u32 clock_rate_high; ++}; ++ ++#endif /* !__RPMI_MSGPROT_H__ */ +diff --git a/include/sbi_utils/mpxy/fdt_mpxy.h b/include/sbi_utils/mpxy/fdt_mpxy.h +new file mode 100644 +index 00000000..57e0b242 +--- /dev/null ++++ b/include/sbi_utils/mpxy/fdt_mpxy.h +@@ -0,0 +1,26 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __FDT_MPXY_H__ ++#define __FDT_MPXY_H__ ++ ++#include ++#include ++ ++#ifdef CONFIG_FDT_MPXY ++ ++int fdt_mpxy_init(const void *fdt); ++ ++#else ++ ++static inline int fdt_mpxy_init(const void *fdt) { return 0; } ++ ++#endif ++ ++#endif +diff --git a/include/sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h b/include/sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h +new file mode 100644 +index 00000000..3a1c1177 +--- /dev/null ++++ b/include/sbi_utils/mpxy/fdt_mpxy_rpmi_mbox.h +@@ -0,0 +1,85 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#ifndef __FDT_MPXY_RPMI_MBOX_H__ ++#define __FDT_MPXY_RPMI_MBOX_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/** Convert the mpxy attribute ID to attribute array index */ ++#define attr_id2index(attr_id) (attr_id - SBI_MPXY_ATTR_MSGPROTO_ATTR_START) ++ ++enum mpxy_msgprot_rpmi_attr_id { ++ MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID = SBI_MPXY_ATTR_MSGPROTO_ATTR_START, ++ MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION, ++ MPXY_MSGPROT_RPMI_ATTR_IMPL_ID, ++ MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION, ++ MPXY_MSGPROT_RPMI_ATTR_MAX_ID ++}; ++ ++/** ++ * MPXY message protocol attributes for RPMI ++ * Order of attribute fields must follow the ++ * attribute IDs in `enum mpxy_msgprot_rpmi_attr_id` ++ */ ++struct mpxy_rpmi_channel_attrs { ++ u32 servicegrp_id; ++ u32 servicegrp_ver; ++ u32 impl_id; ++ u32 impl_ver; ++}; ++ ++/** Make sure all attributes are packed for direct memcpy */ ++#define assert_field_offset(field, attr_offset) \ ++ _Static_assert( \ ++ ((offsetof(struct mpxy_rpmi_channel_attrs, field)) / \ ++ sizeof(u32)) == (attr_offset - SBI_MPXY_ATTR_MSGPROTO_ATTR_START),\ ++ "field " #field \ ++ " from struct mpxy_rpmi_channel_attrs invalid offset, expected " #attr_offset) ++ ++assert_field_offset(servicegrp_id, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_ID); ++assert_field_offset(servicegrp_ver, MPXY_MSGPROT_RPMI_ATTR_SERVICEGROUP_VERSION); ++assert_field_offset(impl_id, MPXY_MSGPROT_RPMI_ATTR_IMPL_ID); ++assert_field_offset(impl_ver, MPXY_MSGPROT_RPMI_ATTR_IMPL_VERSION); ++ ++/** MPXY RPMI service data for each service group */ ++struct mpxy_rpmi_service_data { ++ u8 id; ++ u32 min_tx_len; ++ u32 max_tx_len; ++ u32 min_rx_len; ++ u32 max_rx_len; ++}; ++ ++/** MPXY RPMI mbox data for each service group */ ++struct mpxy_rpmi_mbox_data { ++ u32 servicegrp_id; ++ u32 num_services; ++ struct mpxy_rpmi_service_data *service_data; ++ ++ /** Transfer RPMI service group message */ ++ int (*xfer_group)(void *context, struct mbox_chan *chan, ++ struct mbox_xfer *xfer); ++ ++ /** Setup RPMI service group context for MPXY */ ++ int (*setup_group)(void **context, struct mbox_chan *chan, ++ const struct mpxy_rpmi_mbox_data *data); ++ ++ /** Cleanup RPMI service group context for MPXY */ ++ void (*cleanup_group)(void *context); ++}; ++ ++/** Common probe function for MPXY RPMI drivers */ ++int mpxy_rpmi_mbox_init(const void *fdt, int nodeoff, const struct fdt_match *match); ++ ++#endif +diff --git a/lib/sbi/Kconfig b/lib/sbi/Kconfig +index df74bba3..21df580a 100644 +--- a/lib/sbi/Kconfig ++++ b/lib/sbi/Kconfig +@@ -34,4 +34,13 @@ config SBI_ECALL_VENDOR + bool "Platform-defined vendor extensions" + default y + ++config SBI_ECALL_SSE ++ bool "SSE extension" ++ default y ++ ++config SBI_ECALL_MPXY ++ bool "MPXY extension" ++ default y ++ ++ + endmenu +diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk +index 10bd9eba..e0c57631 100644 +--- a/lib/sbi/objects.mk ++++ b/lib/sbi/objects.mk +@@ -43,13 +43,21 @@ libsbi-objs-$(CONFIG_SBI_ECALL_LEGACY) += sbi_ecall_legacy.o + carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_VENDOR) += ecall_vendor + libsbi-objs-$(CONFIG_SBI_ECALL_VENDOR) += sbi_ecall_vendor.o + ++carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_SSE) += ecall_sse ++libsbi-objs-$(CONFIG_SBI_ECALL_SSE) += sbi_ecall_sse.o ++ ++carray-sbi_ecall_exts-$(CONFIG_SBI_ECALL_MPXY) += ecall_mpxy ++libsbi-objs-$(CONFIG_SBI_ECALL_MPXY) += sbi_ecall_mpxy.o ++ + libsbi-objs-y += sbi_bitmap.o + libsbi-objs-y += sbi_bitops.o + libsbi-objs-y += sbi_console.o + libsbi-objs-y += sbi_domain.o ++libsbi-objs-y += sbi_domain_data.o + libsbi-objs-y += sbi_emulate_csr.o + libsbi-objs-y += sbi_fifo.o + libsbi-objs-y += sbi_hart.o ++libsbi-objs-y += sbi_heap.o + libsbi-objs-y += sbi_math.o + libsbi-objs-y += sbi_hfence.o + libsbi-objs-y += sbi_hsm.o +@@ -60,7 +68,9 @@ libsbi-objs-y += sbi_irqchip.o + libsbi-objs-y += sbi_misaligned_ldst.o + libsbi-objs-y += sbi_platform.o + libsbi-objs-y += sbi_pmu.o ++libsbi-objs-y += sbi_mpxy.o + libsbi-objs-y += sbi_scratch.o ++libsbi-objs-y += sbi_sse.o + libsbi-objs-y += sbi_string.o + libsbi-objs-y += sbi_system.o + libsbi-objs-y += sbi_timer.o +diff --git a/lib/sbi/sbi_domain_data.c b/lib/sbi/sbi_domain_data.c +new file mode 100644 +index 00000000..5c2ea79d +--- /dev/null ++++ b/lib/sbi/sbi_domain_data.c +@@ -0,0 +1,142 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static SBI_LIST_HEAD(data_list); ++static DECLARE_BITMAP(data_idx_bmap, SBI_DOMAIN_MAX_DATA_PTRS); ++ ++void *sbi_domain_data_ptr(struct sbi_domain *dom, struct sbi_domain_data *data) ++{ ++ if (dom && data && data->data_idx < SBI_DOMAIN_MAX_DATA_PTRS) ++ return dom->data_priv.idx_to_data_ptr[data->data_idx]; ++ ++ return NULL; ++} ++ ++static int domain_setup_data_one(struct sbi_domain *dom, ++ struct sbi_domain_data *data) ++{ ++ struct sbi_domain_data_priv *priv = &dom->data_priv; ++ void *data_ptr; ++ int rc; ++ ++ if (priv->idx_to_data_ptr[data->data_idx]) ++ return SBI_EALREADY; ++ ++ data_ptr = sbi_zalloc(data->data_size); ++ if (!data_ptr) { ++ sbi_domain_cleanup_data(dom); ++ return SBI_ENOMEM; ++ } ++ ++ if (data->data_setup) { ++ rc = data->data_setup(dom, data, data_ptr); ++ if (rc) { ++ sbi_free(data_ptr); ++ return rc; ++ } ++ } ++ ++ priv->idx_to_data_ptr[data->data_idx] = data_ptr; ++ return 0; ++} ++ ++static void domain_cleanup_data_one(struct sbi_domain *dom, ++ struct sbi_domain_data *data) ++{ ++ struct sbi_domain_data_priv *priv = &dom->data_priv; ++ void *data_ptr; ++ ++ data_ptr = priv->idx_to_data_ptr[data->data_idx]; ++ if (!data_ptr) ++ return; ++ ++ if (data->data_cleanup) ++ data->data_cleanup(dom, data, data_ptr); ++ ++ sbi_free(data_ptr); ++ priv->idx_to_data_ptr[data->data_idx] = NULL; ++} ++ ++int sbi_domain_setup_data(struct sbi_domain *dom) ++{ ++ struct sbi_domain_data *data; ++ int rc; ++ ++ if (!dom) ++ return SBI_EINVAL; ++ ++ sbi_list_for_each_entry(data, &data_list, head) { ++ rc = domain_setup_data_one(dom, data); ++ if (rc) { ++ sbi_domain_cleanup_data(dom); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++void sbi_domain_cleanup_data(struct sbi_domain *dom) ++{ ++ struct sbi_domain_data *data; ++ ++ if (!dom) ++ return; ++ ++ sbi_list_for_each_entry(data, &data_list, head) ++ domain_cleanup_data_one(dom, data); ++} ++ ++int sbi_domain_register_data(struct sbi_domain_data *data) ++{ ++ struct sbi_domain *dom; ++ u32 data_idx; ++ int rc; ++ u32 i; ++ ++ if (!data || !data->data_size) ++ return SBI_EINVAL; ++ ++ for (data_idx = 0; data_idx < SBI_DOMAIN_MAX_DATA_PTRS; data_idx++) { ++ if (!bitmap_test(data_idx_bmap, data_idx)) ++ break; ++ } ++ if (SBI_DOMAIN_MAX_DATA_PTRS <= data_idx) ++ return SBI_ENOSPC; ++ bitmap_set(data_idx_bmap, data_idx, 1); ++ ++ data->data_idx = data_idx; ++ sbi_list_add_tail(&data->head, &data_list); ++ ++ sbi_domain_for_each(i, dom) { ++ rc = domain_setup_data_one(dom, data); ++ if (rc) { ++ sbi_domain_unregister_data(data); ++ return rc; ++ } ++ } ++ ++ return 0; ++} ++ ++void sbi_domain_unregister_data(struct sbi_domain_data *data) ++{ ++ u32 i; ++ struct sbi_domain *dom; ++ ++ sbi_domain_for_each(i, dom) ++ domain_cleanup_data_one(dom, data); ++ ++ sbi_list_del(&data->head); ++ bitmap_clear(data_idx_bmap, data->data_idx, 1); ++} +diff --git a/lib/sbi/sbi_ecall.c b/lib/sbi/sbi_ecall.c +index 6caadea6..d6f4782b 100644 +--- a/lib/sbi/sbi_ecall.c ++++ b/lib/sbi/sbi_ecall.c +@@ -102,14 +102,12 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + struct sbi_ecall_extension *ext; + unsigned long extension_id = regs->a7; + unsigned long func_id = regs->a6; +- struct sbi_trap_info trap = {0}; +- unsigned long out_val = 0; ++ struct sbi_ecall_return out = {0}; + bool is_0_1_spec = 0; + + ext = sbi_ecall_find_extension(extension_id); + if (ext && ext->handle) { +- ret = ext->handle(extension_id, func_id, +- regs, &out_val, &trap); ++ ret = ext->handle(extension_id, func_id, regs, &out); + if (extension_id >= SBI_EXT_0_1_SET_TIMER && + extension_id <= SBI_EXT_0_1_SHUTDOWN) + is_0_1_spec = 1; +@@ -117,21 +115,20 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + ret = SBI_ENOTSUPP; + } + +- if (ret == SBI_ETRAP) { +- trap.epc = regs->mepc; +- sbi_trap_redirect(regs, &trap); +- } else if (extension_id == SBI_EXT_PENGLAI_HOST || +- extension_id == SBI_EXT_PENGLAI_ENCLAVE) { ++ if (extension_id == SBI_EXT_PENGLAI_HOST || ++ extension_id == SBI_EXT_PENGLAI_ENCLAVE) { + //FIXME: update the return value assignment when we update enclave side SBI routines +- regs->a0 = out_val; ++ regs->a0 = out.value; + if (!is_0_1_spec){ + if(check_in_enclave_world() == -1){ + regs->a0 = ret; +- regs->a1 = out_val; ++ regs->a1 = out.value; + } + } +- } else { +- if (ret < SBI_LAST_ERR) { ++ } else if (!out.skip_regs_update) { ++ if (ret < SBI_LAST_ERR || ++ (extension_id != SBI_EXT_0_1_CONSOLE_GETCHAR && ++ SBI_SUCCESS < ret)) { + sbi_printf("%s: Invalid error %d for ext=0x%lx " + "func=0x%lx\n", __func__, ret, + extension_id, func_id); +@@ -149,7 +146,7 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + regs->mepc += 4; + regs->a0 = ret; + if (!is_0_1_spec) +- regs->a1 = out_val; ++ regs->a1 = out.value; + } + + return 0; +@@ -158,42 +155,16 @@ int sbi_ecall_handler(struct sbi_trap_regs *regs) + int sbi_ecall_init(void) + { + int ret; +- // struct sbi_ecall_extension *ext; +- // unsigned long i; ++ struct sbi_ecall_extension *ext; ++ unsigned long i; + +- /* for (i = 0; i < sbi_ecall_exts_size; i++) { ++ for (i = 0; i < sbi_ecall_exts_size; i++) { + ext = sbi_ecall_exts[i]; + ret = sbi_ecall_register_extension(ext); + if (ret) + return ret; +- } */ +- ret = sbi_ecall_register_extension(&ecall_time); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_rfence); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_ipi); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_base); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_hsm); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_srst); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_pmu); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_legacy); +- if (ret) +- return ret; +- ret = sbi_ecall_register_extension(&ecall_vendor); +- if (ret) +- return ret; ++ } ++ + ret = sbi_ecall_register_extension(&ecall_penglai_host); + if (ret) + return ret; +diff --git a/lib/sbi/sbi_ecall_base.c b/lib/sbi/sbi_ecall_base.c +index 786d2ac6..66bef22d 100644 +--- a/lib/sbi/sbi_ecall_base.c ++++ b/lib/sbi/sbi_ecall_base.c +@@ -33,37 +33,36 @@ static int sbi_ecall_base_probe(unsigned long extid, unsigned long *out_val) + } + + static int sbi_ecall_base_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + + switch (funcid) { + case SBI_EXT_BASE_GET_SPEC_VERSION: +- *out_val = (SBI_ECALL_VERSION_MAJOR << +- SBI_SPEC_VERSION_MAJOR_OFFSET) & +- (SBI_SPEC_VERSION_MAJOR_MASK << +- SBI_SPEC_VERSION_MAJOR_OFFSET); +- *out_val = *out_val | SBI_ECALL_VERSION_MINOR; ++ out->value = (SBI_ECALL_VERSION_MAJOR << ++ SBI_SPEC_VERSION_MAJOR_OFFSET) & ++ (SBI_SPEC_VERSION_MAJOR_MASK << ++ SBI_SPEC_VERSION_MAJOR_OFFSET); ++ out->value = out->value | SBI_ECALL_VERSION_MINOR; + break; + case SBI_EXT_BASE_GET_IMP_ID: +- *out_val = sbi_ecall_get_impid(); ++ out->value = sbi_ecall_get_impid(); + break; + case SBI_EXT_BASE_GET_IMP_VERSION: +- *out_val = OPENSBI_VERSION; ++ out->value = OPENSBI_VERSION; + break; + case SBI_EXT_BASE_GET_MVENDORID: +- *out_val = csr_read(CSR_MVENDORID); ++ out->value = csr_read(CSR_MVENDORID); + break; + case SBI_EXT_BASE_GET_MARCHID: +- *out_val = csr_read(CSR_MARCHID); ++ out->value = csr_read(CSR_MARCHID); + break; + case SBI_EXT_BASE_GET_MIMPID: +- *out_val = csr_read(CSR_MIMPID); ++ out->value = csr_read(CSR_MIMPID); + break; + case SBI_EXT_BASE_PROBE_EXT: +- ret = sbi_ecall_base_probe(regs->a0, out_val); ++ ret = sbi_ecall_base_probe(regs->a0, &out->value); + break; + default: + ret = SBI_ENOTSUPP; +diff --git a/lib/sbi/sbi_ecall_hsm.c b/lib/sbi/sbi_ecall_hsm.c +index a339abf0..db8079a9 100644 +--- a/lib/sbi/sbi_ecall_hsm.c ++++ b/lib/sbi/sbi_ecall_hsm.c +@@ -18,9 +18,8 @@ + #include + + static int sbi_ecall_hsm_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + struct sbi_scratch *scratch = sbi_scratch_thishart_ptr(); +@@ -47,7 +46,7 @@ static int sbi_ecall_hsm_handler(unsigned long extid, unsigned long funcid, + ret = SBI_ENOTSUPP; + }; + if (ret >= 0) { +- *out_val = ret; ++ out->value = ret; + ret = 0; + } + +diff --git a/lib/sbi/sbi_ecall_ipi.c b/lib/sbi/sbi_ecall_ipi.c +index f4797e11..20ea85da 100644 +--- a/lib/sbi/sbi_ecall_ipi.c ++++ b/lib/sbi/sbi_ecall_ipi.c +@@ -15,9 +15,8 @@ + #include + + static int sbi_ecall_ipi_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + +diff --git a/lib/sbi/sbi_ecall_legacy.c b/lib/sbi/sbi_ecall_legacy.c +index e20de766..5785059b 100644 +--- a/lib/sbi/sbi_ecall_legacy.c ++++ b/lib/sbi/sbi_ecall_legacy.c +@@ -43,13 +43,13 @@ static int sbi_load_hart_mask_unpriv(ulong *pmask, ulong *hmask, + } + + static int sbi_ecall_legacy_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + struct sbi_tlb_info tlb_info; + u32 source_hart = current_hartid(); ++ struct sbi_trap_info trap = {0}; + ulong hmask = 0; + + switch (extid) { +@@ -71,39 +71,59 @@ static int sbi_ecall_legacy_handler(unsigned long extid, unsigned long funcid, + break; + case SBI_EXT_0_1_SEND_IPI: + ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0, +- &hmask, out_trap); +- if (ret != SBI_ETRAP) ++ &hmask, &trap); ++ if (ret != SBI_ETRAP) { + ret = sbi_ipi_send_smode(hmask, 0); ++ } else { ++ ret = 0; ++ trap.epc = regs->mepc; ++ sbi_trap_redirect(regs, &trap); ++ out->skip_regs_update = true; ++ } + break; + case SBI_EXT_0_1_REMOTE_FENCE_I: + ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0, +- &hmask, out_trap); ++ &hmask, &trap); + if (ret != SBI_ETRAP) { + SBI_TLB_INFO_INIT(&tlb_info, 0, 0, 0, 0, + sbi_tlb_local_fence_i, + source_hart); + ret = sbi_tlb_request(hmask, 0, &tlb_info); ++ } else { ++ ret = 0; ++ trap.epc = regs->mepc; ++ sbi_trap_redirect(regs, &trap); ++ out->skip_regs_update = true; + } + break; + case SBI_EXT_0_1_REMOTE_SFENCE_VMA: + ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0, +- &hmask, out_trap); ++ &hmask, &trap); + if (ret != SBI_ETRAP) { + SBI_TLB_INFO_INIT(&tlb_info, regs->a1, regs->a2, 0, 0, +- sbi_tlb_local_sfence_vma, +- source_hart); ++ sbi_tlb_local_sfence_vma, source_hart); + ret = sbi_tlb_request(hmask, 0, &tlb_info); ++ } else { ++ ret = 0; ++ trap.epc = regs->mepc; ++ sbi_trap_redirect(regs, &trap); ++ out->skip_regs_update = true; + } + break; + case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID: + ret = sbi_load_hart_mask_unpriv((ulong *)regs->a0, +- &hmask, out_trap); ++ &hmask, &trap); + if (ret != SBI_ETRAP) { + SBI_TLB_INFO_INIT(&tlb_info, regs->a1, + regs->a2, regs->a3, 0, + sbi_tlb_local_sfence_vma_asid, + source_hart); + ret = sbi_tlb_request(hmask, 0, &tlb_info); ++ } else { ++ ret = 0; ++ trap.epc = regs->mepc; ++ sbi_trap_redirect(regs, &trap); ++ out->skip_regs_update = true; + } + break; + case SBI_EXT_0_1_SHUTDOWN: +@@ -112,7 +132,7 @@ static int sbi_ecall_legacy_handler(unsigned long extid, unsigned long funcid, + break; + default: + ret = SBI_ENOTSUPP; +- }; ++ } + + return ret; + } +diff --git a/lib/sbi/sbi_ecall_mpxy.c b/lib/sbi/sbi_ecall_mpxy.c +new file mode 100644 +index 00000000..0a5e3525 +--- /dev/null ++++ b/lib/sbi/sbi_ecall_mpxy.c +@@ -0,0 +1,72 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++static int sbi_ecall_mpxy_handler(unsigned long extid, unsigned long funcid, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) ++{ ++ int ret = 0; ++ ++ switch (funcid) { ++ case SBI_EXT_MPXY_GET_SHMEM_SIZE: ++ out->value = sbi_mpxy_get_shmem_size(); ++ break; ++ case SBI_EXT_MPXY_SET_SHMEM: ++ ret = sbi_mpxy_set_shmem(regs->a0, regs->a1, regs->a2); ++ break; ++ case SBI_EXT_MPXY_GET_CHANNEL_IDS: ++ ret = sbi_mpxy_get_channel_ids(regs->a0); ++ break; ++ case SBI_EXT_MPXY_READ_ATTRS: ++ ret = sbi_mpxy_read_attrs(regs->a0, regs->a1, regs->a2); ++ break; ++ case SBI_EXT_MPXY_WRITE_ATTRS: ++ ret = sbi_mpxy_write_attrs(regs->a0, regs->a1, regs->a2); ++ break; ++ case SBI_EXT_MPXY_SEND_MSG_WITH_RESP: ++ ret = sbi_mpxy_send_message(regs->a0, regs->a1, ++ regs->a2, &out->value); ++ break; ++ case SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP: ++ ret = sbi_mpxy_send_message(regs->a0, regs->a1, regs->a2, ++ NULL); ++ break; ++ case SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS: ++ ret = sbi_mpxy_get_notification_events(regs->a0, &out->value); ++ break; ++ default: ++ ret = SBI_ENOTSUPP; ++ } ++ ++ return ret; ++} ++ ++struct sbi_ecall_extension ecall_mpxy; ++ ++static int sbi_ecall_mpxy_register_extensions(void) ++{ ++ if (!sbi_mpxy_channel_available()) ++ return 0; ++ ++ return sbi_ecall_register_extension(&ecall_mpxy); ++} ++ ++struct sbi_ecall_extension ecall_mpxy = { ++ .name = "mpxy", ++ .extid_start = SBI_EXT_MPXY, ++ .extid_end = SBI_EXT_MPXY, ++ .register_extensions = sbi_ecall_mpxy_register_extensions, ++ .handle = sbi_ecall_mpxy_handler, ++}; +diff --git a/lib/sbi/sbi_ecall_penglai.c b/lib/sbi/sbi_ecall_penglai.c +index 2834e36f..c9e8a67c 100644 +--- a/lib/sbi/sbi_ecall_penglai.c ++++ b/lib/sbi/sbi_ecall_penglai.c +@@ -16,9 +16,10 @@ + + // static spinlock_t sm_big_lock = SPIN_LOCK_INITIALIZER; + +-static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++static int sbi_ecall_penglai_host_handler(unsigned long extid, ++ unsigned long funcid, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + uintptr_t ret = 0; + printm("[Penglai KModule@%u] %s invoked,funcid=%ld\r\n", +@@ -67,7 +68,7 @@ static int sbi_ecall_penglai_host_handler(unsigned long extid, unsigned long fun + } + //((struct sbi_trap_regs *)regs)->mepc = csr_read(CSR_MEPC); + //((struct sbi_trap_regs *)regs)->mstatus = csr_read(CSR_MSTATUS); +- *out_val = ret; ++ out->value = ret; + // spin_unlock(&sm_big_lock); + printm("[Penglai KModule@%u] %s return %ld, funcid=%ld\r\n", + current_hartid(), __func__, ret, funcid); +@@ -80,9 +81,10 @@ struct sbi_ecall_extension ecall_penglai_host = { + .handle = sbi_ecall_penglai_host_handler, + }; + +-static int sbi_ecall_penglai_enclave_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++static int sbi_ecall_penglai_enclave_handler(unsigned long extid, ++ unsigned long funcid, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + uintptr_t ret = 0; + // spin_lock(&sm_big_lock); +@@ -107,7 +109,7 @@ static int sbi_ecall_penglai_enclave_handler(unsigned long extid, unsigned long + } + printm("[Penglai KModule@%u] %s return %ld,funcid=%ld\r\n", current_hartid(), __func__,ret , funcid); + // spin_unlock(&sm_big_lock); +- *out_val = ret; ++ out->value = ret; + return ret; + } + +diff --git a/lib/sbi/sbi_ecall_pmu.c b/lib/sbi/sbi_ecall_pmu.c +index 826c8a89..fccddeaa 100644 +--- a/lib/sbi/sbi_ecall_pmu.c ++++ b/lib/sbi/sbi_ecall_pmu.c +@@ -18,9 +18,8 @@ + #include + + static int sbi_ecall_pmu_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + uint64_t temp; +@@ -29,12 +28,12 @@ static int sbi_ecall_pmu_handler(unsigned long extid, unsigned long funcid, + case SBI_EXT_PMU_NUM_COUNTERS: + ret = sbi_pmu_num_ctr(); + if (ret >= 0) { +- *out_val = ret; ++ out->value = ret; + ret = 0; + } + break; + case SBI_EXT_PMU_COUNTER_GET_INFO: +- ret = sbi_pmu_ctr_get_info(regs->a0, out_val); ++ ret = sbi_pmu_ctr_get_info(regs->a0, &out->value); + break; + case SBI_EXT_PMU_COUNTER_CFG_MATCH: + #if __riscv_xlen == 32 +@@ -45,14 +44,14 @@ static int sbi_ecall_pmu_handler(unsigned long extid, unsigned long funcid, + ret = sbi_pmu_ctr_cfg_match(regs->a0, regs->a1, regs->a2, + regs->a3, temp); + if (ret >= 0) { +- *out_val = ret; ++ out->value = ret; + ret = 0; + } + + break; + case SBI_EXT_PMU_COUNTER_FW_READ: + ret = sbi_pmu_ctr_fw_read(regs->a0, &temp); +- *out_val = temp; ++ out->value = temp; + break; + case SBI_EXT_PMU_COUNTER_START: + +@@ -68,7 +67,7 @@ static int sbi_ecall_pmu_handler(unsigned long extid, unsigned long funcid, + break; + default: + ret = SBI_ENOTSUPP; +- }; ++ } + + return ret; + } +diff --git a/lib/sbi/sbi_ecall_rfence.c b/lib/sbi/sbi_ecall_rfence.c +index 8f0e3d7b..11bf90cd 100644 +--- a/lib/sbi/sbi_ecall_rfence.c ++++ b/lib/sbi/sbi_ecall_rfence.c +@@ -16,9 +16,8 @@ + #include + + static int sbi_ecall_rfence_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + unsigned long vmid; +diff --git a/lib/sbi/sbi_ecall_srst.c b/lib/sbi/sbi_ecall_srst.c +index 93b012ce..1c52f1f7 100644 +--- a/lib/sbi/sbi_ecall_srst.c ++++ b/lib/sbi/sbi_ecall_srst.c +@@ -15,9 +15,8 @@ + #include + + static int sbi_ecall_srst_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + if (funcid == SBI_EXT_SRST_RESET) { + if ((((u32)-1U) <= ((u64)regs->a0)) || +diff --git a/lib/sbi/sbi_ecall_sse.c b/lib/sbi/sbi_ecall_sse.c +new file mode 100644 +index 00000000..b7dbf072 +--- /dev/null ++++ b/lib/sbi/sbi_ecall_sse.c +@@ -0,0 +1,64 @@ ++#include ++#include ++#include ++#include ++ ++static int sbi_ecall_sse_handler(unsigned long extid, unsigned long funcid, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) ++{ ++ int ret; ++ ++ switch (funcid) { ++ case SBI_EXT_SSE_READ_ATTR: ++ ret = sbi_sse_read_attrs(regs->a0, regs->a1, regs->a2, ++ regs->a3, regs->a4); ++ break; ++ case SBI_EXT_SSE_WRITE_ATTR: ++ ret = sbi_sse_write_attrs(regs->a0, regs->a1, regs->a2, ++ regs->a3, regs->a4); ++ break; ++ case SBI_EXT_SSE_REGISTER: ++ ret = sbi_sse_register(regs->a0, regs->a1, regs->a2); ++ break; ++ case SBI_EXT_SSE_UNREGISTER: ++ ret = sbi_sse_unregister(regs->a0); ++ break; ++ case SBI_EXT_SSE_ENABLE: ++ ret = sbi_sse_enable(regs->a0); ++ break; ++ case SBI_EXT_SSE_DISABLE: ++ ret = sbi_sse_disable(regs->a0); ++ break; ++ case SBI_EXT_SSE_COMPLETE: ++ ret = sbi_sse_complete(regs, out); ++ break; ++ case SBI_EXT_SSE_INJECT: ++ ret = sbi_sse_inject_from_ecall(regs->a0, regs->a1, out); ++ break; ++ case SBI_EXT_SSE_HART_MASK: ++ ret = sbi_sse_hart_mask(); ++ break; ++ case SBI_EXT_SSE_HART_UNMASK: ++ ret = sbi_sse_hart_unmask(); ++ break; ++ default: ++ ret = SBI_ENOTSUPP; ++ } ++ return ret; ++} ++ ++struct sbi_ecall_extension ecall_sse; ++ ++static int sbi_ecall_sse_register_extensions(void) ++{ ++ return sbi_ecall_register_extension(&ecall_sse); ++} ++ ++struct sbi_ecall_extension ecall_sse = { ++ .name = "sse", ++ .extid_start = SBI_EXT_SSE, ++ .extid_end = SBI_EXT_SSE, ++ .register_extensions = sbi_ecall_sse_register_extensions, ++ .handle = sbi_ecall_sse_handler, ++}; +diff --git a/lib/sbi/sbi_ecall_time.c b/lib/sbi/sbi_ecall_time.c +index 668cb176..18b93171 100644 +--- a/lib/sbi/sbi_ecall_time.c ++++ b/lib/sbi/sbi_ecall_time.c +@@ -15,9 +15,8 @@ + #include + + static int sbi_ecall_time_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + int ret = 0; + +diff --git a/lib/sbi/sbi_ecall_vendor.c b/lib/sbi/sbi_ecall_vendor.c +index 92528296..a3dbb88e 100644 +--- a/lib/sbi/sbi_ecall_vendor.c ++++ b/lib/sbi/sbi_ecall_vendor.c +@@ -23,13 +23,11 @@ static int sbi_ecall_vendor_probe(unsigned long extid, + } + + static int sbi_ecall_vendor_handler(unsigned long extid, unsigned long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_val, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + return sbi_platform_vendor_ext_provider(sbi_platform_thishart_ptr(), +- extid, funcid, regs, +- out_val, out_trap); ++ extid, funcid, regs, out); + } + + struct sbi_ecall_extension ecall_vendor = { +diff --git a/lib/sbi/sbi_heap.c b/lib/sbi/sbi_heap.c +new file mode 100644 +index 00000000..fdd09163 +--- /dev/null ++++ b/lib/sbi/sbi_heap.c +@@ -0,0 +1,280 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Anup Patel ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Minimum size and alignment of heap allocations */ ++#define HEAP_ALLOC_ALIGN 64 ++#define HEAP_HOUSEKEEPING_FACTOR 16 ++ ++struct heap_node { ++ struct sbi_dlist head; ++ unsigned long addr; ++ unsigned long size; ++}; ++ ++struct sbi_heap_control { ++ spinlock_t lock; ++ unsigned long base; ++ unsigned long size; ++ unsigned long hkbase; ++ unsigned long hksize; ++ struct sbi_dlist free_node_list; ++ struct sbi_dlist free_space_list; ++ struct sbi_dlist used_space_list; ++}; ++ ++struct sbi_heap_control global_hpctrl; ++ ++static void *alloc_with_align(struct sbi_heap_control *hpctrl, ++ size_t align, size_t size) ++{ ++ void *ret = NULL; ++ struct heap_node *n, *np, *rem; ++ unsigned long lowest_aligned; ++ size_t pad; ++ ++ if (!size) ++ return NULL; ++ ++ size += align - 1; ++ size &= ~((unsigned long)align - 1); ++ ++ spin_lock(&hpctrl->lock); ++ ++ np = NULL; ++ sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) { ++ lowest_aligned = ROUNDUP(n->addr, align); ++ pad = lowest_aligned - n->addr; ++ ++ if (size + pad <= n->size) { ++ np = n; ++ break; ++ } ++ } ++ if (!np) ++ goto out; ++ ++ if (pad) { ++ if (sbi_list_empty(&hpctrl->free_node_list)) { ++ goto out; ++ } ++ ++ n = sbi_list_first_entry(&hpctrl->free_node_list, ++ struct heap_node, head); ++ sbi_list_del(&n->head); ++ ++ if ((size + pad < np->size) && ++ !sbi_list_empty(&hpctrl->free_node_list)) { ++ rem = sbi_list_first_entry(&hpctrl->free_node_list, ++ struct heap_node, head); ++ sbi_list_del(&rem->head); ++ rem->addr = np->addr + (size + pad); ++ rem->size = np->size - (size + pad); ++ sbi_list_add_tail(&rem->head, ++ &hpctrl->free_space_list); ++ } else if (size + pad != np->size) { ++ /* Can't allocate, return n */ ++ sbi_list_add(&n->head, &hpctrl->free_node_list); ++ ret = NULL; ++ goto out; ++ } ++ ++ n->addr = lowest_aligned; ++ n->size = size; ++ sbi_list_add_tail(&n->head, &hpctrl->used_space_list); ++ ++ np->size = pad; ++ ret = (void *)n->addr; ++ } else { ++ if ((size < np->size) && ++ !sbi_list_empty(&hpctrl->free_node_list)) { ++ n = sbi_list_first_entry(&hpctrl->free_node_list, ++ struct heap_node, head); ++ sbi_list_del(&n->head); ++ n->addr = np->addr; ++ n->size = size; ++ np->addr += size; ++ np->size -= size; ++ sbi_list_add_tail(&n->head, &hpctrl->used_space_list); ++ ret = (void *)n->addr; ++ } else if (size == np->size) { ++ sbi_list_del(&np->head); ++ sbi_list_add_tail(&np->head, &hpctrl->used_space_list); ++ ret = (void *)np->addr; ++ } ++ } ++ ++out: ++ spin_unlock(&hpctrl->lock); ++ ++ return ret; ++} ++ ++void *sbi_malloc_from(struct sbi_heap_control *hpctrl, size_t size) ++{ ++ return alloc_with_align(hpctrl, HEAP_ALLOC_ALIGN, size); ++} ++ ++void *sbi_aligned_alloc_from(struct sbi_heap_control *hpctrl, ++ size_t alignment, size_t size) ++{ ++ if (alignment < HEAP_ALLOC_ALIGN) ++ alignment = HEAP_ALLOC_ALIGN; ++ ++ /* Make sure alignment is power of two */ ++ if ((alignment & (alignment - 1)) != 0) ++ return NULL; ++ ++ /* Make sure size is multiple of alignment */ ++ if (size % alignment != 0) ++ return NULL; ++ ++ return alloc_with_align(hpctrl, alignment, size); ++} ++ ++void *sbi_zalloc_from(struct sbi_heap_control *hpctrl, size_t size) ++{ ++ void *ret = sbi_malloc_from(hpctrl, size); ++ ++ if (ret) ++ sbi_memset(ret, 0, size); ++ return ret; ++} ++ ++void sbi_free_from(struct sbi_heap_control *hpctrl, void *ptr) ++{ ++ struct heap_node *n, *np; ++ ++ if (!ptr) ++ return; ++ ++ spin_lock(&hpctrl->lock); ++ ++ np = NULL; ++ sbi_list_for_each_entry(n, &hpctrl->used_space_list, head) { ++ if ((n->addr <= (unsigned long)ptr) && ++ ((unsigned long)ptr < (n->addr + n->size))) { ++ np = n; ++ break; ++ } ++ } ++ if (!np) { ++ spin_unlock(&hpctrl->lock); ++ return; ++ } ++ ++ sbi_list_del(&np->head); ++ ++ sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) { ++ if ((np->addr + np->size) == n->addr) { ++ n->addr = np->addr; ++ n->size += np->size; ++ sbi_list_add_tail(&np->head, &hpctrl->free_node_list); ++ np = NULL; ++ break; ++ } else if (np->addr == (n->addr + n->size)) { ++ n->size += np->size; ++ sbi_list_add_tail(&np->head, &hpctrl->free_node_list); ++ np = NULL; ++ break; ++ } else if ((n->addr + n->size) < np->addr) { ++ sbi_list_add(&np->head, &n->head); ++ np = NULL; ++ break; ++ } ++ } ++ if (np) ++ sbi_list_add_tail(&np->head, &hpctrl->free_space_list); ++ ++ spin_unlock(&hpctrl->lock); ++} ++ ++unsigned long sbi_heap_free_space_from(struct sbi_heap_control *hpctrl) ++{ ++ struct heap_node *n; ++ unsigned long ret = 0; ++ ++ spin_lock(&hpctrl->lock); ++ sbi_list_for_each_entry(n, &hpctrl->free_space_list, head) ++ ret += n->size; ++ spin_unlock(&hpctrl->lock); ++ ++ return ret; ++} ++ ++unsigned long sbi_heap_used_space_from(struct sbi_heap_control *hpctrl) ++{ ++ return hpctrl->size - hpctrl->hksize - sbi_heap_free_space(); ++} ++ ++unsigned long sbi_heap_reserved_space_from(struct sbi_heap_control *hpctrl) ++{ ++ return hpctrl->hksize; ++} ++ ++int sbi_heap_init_new(struct sbi_heap_control *hpctrl, unsigned long base, ++ unsigned long size) ++{ ++ unsigned long i; ++ struct heap_node *n; ++ ++ /* Initialize heap control */ ++ SPIN_LOCK_INIT(hpctrl->lock); ++ hpctrl->base = base; ++ hpctrl->size = size; ++ hpctrl->hkbase = hpctrl->base; ++ hpctrl->hksize = hpctrl->size / HEAP_HOUSEKEEPING_FACTOR; ++ hpctrl->hksize &= ~((unsigned long)HEAP_BASE_ALIGN - 1); ++ SBI_INIT_LIST_HEAD(&hpctrl->free_node_list); ++ SBI_INIT_LIST_HEAD(&hpctrl->free_space_list); ++ SBI_INIT_LIST_HEAD(&hpctrl->used_space_list); ++ ++ /* Prepare free node list */ ++ for (i = 0; i < (hpctrl->hksize / sizeof(*n)); i++) { ++ n = (struct heap_node *)(hpctrl->hkbase + (sizeof(*n) * i)); ++ n->addr = n->size = 0; ++ sbi_list_add_tail(&n->head, &hpctrl->free_node_list); ++ } ++ ++ /* Prepare free space list */ ++ n = sbi_list_first_entry(&hpctrl->free_node_list, ++ struct heap_node, head); ++ sbi_list_del(&n->head); ++ n->addr = hpctrl->hkbase + hpctrl->hksize; ++ n->size = hpctrl->size - hpctrl->hksize; ++ sbi_list_add_tail(&n->head, &hpctrl->free_space_list); ++ ++ return 0; ++} ++ ++int sbi_heap_init(struct sbi_scratch *scratch) ++{ ++ /* Sanity checks on heap offset and size */ ++ if (!scratch->fw_heap_size || ++ (scratch->fw_heap_size & (HEAP_BASE_ALIGN - 1)) || ++ (scratch->fw_size < (scratch->fw_heap_offset + scratch->fw_heap_size)) || ++ (scratch->fw_heap_offset & (HEAP_BASE_ALIGN - 1))) ++ return SBI_EINVAL; ++ ++ return sbi_heap_init_new(&global_hpctrl, ++ scratch->fw_start + scratch->fw_heap_offset, ++ scratch->fw_heap_size); ++} ++ ++int sbi_heap_alloc_new(struct sbi_heap_control **hpctrl) ++{ ++ *hpctrl = sbi_calloc(1, sizeof(struct sbi_heap_control)); ++ return 0; ++} +diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c +index 6808e595..1b378d2f 100644 +--- a/lib/sbi/sbi_init.c ++++ b/lib/sbi/sbi_init.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -27,6 +28,8 @@ + #include + #include + #include ++#include ++#include + + #define BANNER \ + " ____ _____ ____ _____\n" \ +@@ -109,6 +112,12 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch) + sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start); + sbi_printf("Firmware Size : %d KB\n", + (u32)(scratch->fw_size / 1024)); ++ sbi_printf("Firmware Heap Size : " ++ "%d KB (total), %d KB (reserved), %d KB (used), %d KB (free)\n", ++ (u32)(scratch->fw_heap_size / 1024), ++ (u32)(sbi_heap_reserved_space() / 1024), ++ (u32)(sbi_heap_used_space() / 1024), ++ (u32)(sbi_heap_free_space() / 1024)); + + /* SBI details */ + sbi_printf("Runtime SBI Version : %d.%d\n", +@@ -247,6 +256,11 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) + if (rc) + sbi_hart_hang(); + ++ /* Note: This has to be second thing in coldboot init sequence */ ++ rc = sbi_heap_init(scratch); ++ if (rc) ++ sbi_hart_hang(); ++ + /* Note: This has to be second thing in coldboot init sequence */ + rc = sbi_domain_init(scratch, hartid); + if (rc) +@@ -317,6 +331,12 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) + sbi_hart_hang(); + } + ++ rc = sbi_mpxy_init(scratch); ++ if (rc) { ++ sbi_printf("%s: mpxy init failed (error %d)\n", __func__, rc); ++ sbi_hart_hang(); ++ } ++ + /* + * Note: Finalize domains after HSM initialization so that we + * can startup non-root domains. +@@ -330,6 +350,12 @@ static void __noreturn init_coldboot(struct sbi_scratch *scratch, u32 hartid) + sbi_hart_hang(); + } + ++ rc = sbi_sse_init(scratch, true); ++ if (rc) { ++ sbi_printf("%s: sse init failed (error %d)\n", __func__, rc); ++ sbi_hart_hang(); ++ } ++ + /* + * Note (DD): + * In our case, the PMP set by domain will be erased, as penglai +@@ -431,6 +457,10 @@ static void init_warm_startup(struct sbi_scratch *scratch, u32 hartid) + if (rc) + sbi_hart_hang(); + ++ rc = sbi_sse_init(scratch, false); ++ if (rc) ++ sbi_hart_hang(); ++ + init_count = sbi_scratch_offset_ptr(scratch, init_count_offset); + (*init_count)++; + +@@ -578,6 +608,8 @@ void __noreturn sbi_exit(struct sbi_scratch *scratch) + + sbi_platform_early_exit(plat); + ++ sbi_sse_exit(scratch); ++ + sbi_pmu_exit(scratch); + + sbi_timer_exit(scratch); +diff --git a/lib/sbi/sbi_mpxy.c b/lib/sbi/sbi_mpxy.c +new file mode 100644 +index 00000000..54c15b4a +--- /dev/null ++++ b/lib/sbi/sbi_mpxy.c +@@ -0,0 +1,752 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems Inc. ++ * ++ * Authors: ++ * Rahul Pathak ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/** Shared memory size across all harts */ ++static unsigned long mpxy_shmem_size = PAGE_SIZE; ++ ++/** List of MPXY proxy channels */ ++static SBI_LIST_HEAD(mpxy_channel_list); ++ ++/** Invalid Physical Address(all bits 1) */ ++#define INVALID_ADDR (-1U) ++ ++/** MPXY Attribute size in bytes */ ++#define ATTR_SIZE (4) ++ ++/** Channel Capability - MSI */ ++#define CAP_MSI_POS 0 ++#define CAP_MSI_MASK (1U << CAP_MSI_POS) ++ ++/** Channel Capability - SSE */ ++#define CAP_SSE_POS 1 ++#define CAP_SSE_MASK (1U << CAP_SSE_POS) ++ ++/** Channel Capability - Events State */ ++#define CAP_EVENTSSTATE_POS 2 ++#define CAP_EVENTSSTATE_MASK (1U << CAP_EVENTSSTATE_POS) ++ ++/** Channel Capability - Send Message With Response function support */ ++#define CAP_SEND_MSG_WITH_RESP_POS 3 ++#define CAP_SEND_MSG_WITH_RESP_MASK (1U << CAP_SEND_MSG_WITH_RESP_POS) ++ ++/** Channel Capability - Send Message Without Response function support */ ++#define CAP_SEND_MSG_WITHOUT_RESP_POS 4 ++#define CAP_SEND_MSG_WITHOUT_RESP_MASK (1U << CAP_SEND_MSG_WITHOUT_RESP_POS) ++ ++/** Channel Capability - Get Notification function support */ ++#define CAP_GET_NOTIFICATIONS_POS 5 ++#define CAP_GET_NOTIFICATIONS_MASK (1U << CAP_GET_NOTIFICATIONS_POS) ++ ++/** Helpers to enable/disable channel capability bits ++ * _c: capability variable ++ * _m: capability mask ++ */ ++#define CAP_ENABLE(_c, _m) INSERT_FIELD(_c, _m, 1) ++#define CAP_DISABLE(_c, _m) INSERT_FIELD(_c, _m, 0) ++#define CAP_GET(_c, _m) EXTRACT_FIELD(_c, _m) ++ ++#define SHMEM_PHYS_ADDR(_hi, _lo) (_lo) ++ ++/** Per hart shared memory */ ++struct mpxy_shmem { ++ unsigned long shmem_addr_lo; ++ unsigned long shmem_addr_hi; ++}; ++ ++struct mpxy_state { ++ /* MSI support in MPXY */ ++ bool msi_avail; ++ /* SSE support in MPXY */ ++ bool sse_avail; ++ /* MPXY Shared memory details */ ++ struct mpxy_shmem shmem; ++}; ++ ++static struct mpxy_state *sbi_domain_get_mpxy_state(struct sbi_domain *dom, ++ u32 hartindex); ++ ++/** Macro to obtain the current hart's MPXY state pointer in current domain */ ++#define sbi_domain_mpxy_state_thishart_ptr() \ ++ sbi_domain_get_mpxy_state(sbi_domain_thishart_ptr(), \ ++ sbi_hartid_to_hartindex(current_hartid())) ++ ++/** Disable hart shared memory */ ++static inline void sbi_mpxy_shmem_disable(struct mpxy_state *ms) ++{ ++ ms->shmem.shmem_addr_lo = INVALID_ADDR; ++ ms->shmem.shmem_addr_hi = INVALID_ADDR; ++} ++ ++/** Check if shared memory is already setup on hart */ ++static inline bool mpxy_shmem_enabled(struct mpxy_state *ms) ++{ ++ return (ms->shmem.shmem_addr_lo == INVALID_ADDR ++ && ms->shmem.shmem_addr_hi == INVALID_ADDR) ? ++ false : true; ++} ++ ++/** Get hart shared memory base address */ ++static inline void *hart_shmem_base(struct mpxy_state *ms) ++{ ++ return (void *)(unsigned long)SHMEM_PHYS_ADDR(ms->shmem.shmem_addr_hi, ++ ms->shmem.shmem_addr_lo); ++} ++ ++/** Make sure all attributes are packed for direct memcpy in ATTR_READ */ ++#define assert_field_offset(field, attr_offset) \ ++ _Static_assert( \ ++ ((offsetof(struct sbi_mpxy_channel_attrs, field)) / \ ++ sizeof(u32)) == attr_offset, \ ++ "field " #field \ ++ " from struct sbi_mpxy_channel_attrs invalid offset, expected " #attr_offset) ++ ++assert_field_offset(msg_proto_id, SBI_MPXY_ATTR_MSG_PROT_ID); ++assert_field_offset(msg_proto_version, SBI_MPXY_ATTR_MSG_PROT_VER); ++assert_field_offset(msg_data_maxlen, SBI_MPXY_ATTR_MSG_MAX_LEN); ++assert_field_offset(msg_send_timeout, SBI_MPXY_ATTR_MSG_SEND_TIMEOUT); ++assert_field_offset(msg_completion_timeout, SBI_MPXY_ATTR_MSG_COMPLETION_TIMEOUT); ++assert_field_offset(capability, SBI_MPXY_ATTR_CHANNEL_CAPABILITY); ++assert_field_offset(sse_event_id, SBI_MPXY_ATTR_SSE_EVENT_ID); ++assert_field_offset(msi_control, SBI_MPXY_ATTR_MSI_CONTROL); ++assert_field_offset(msi_info.msi_addr_lo, SBI_MPXY_ATTR_MSI_ADDR_LO); ++assert_field_offset(msi_info.msi_addr_hi, SBI_MPXY_ATTR_MSI_ADDR_HI); ++assert_field_offset(msi_info.msi_data, SBI_MPXY_ATTR_MSI_DATA); ++assert_field_offset(eventsstate_ctrl, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL); ++ ++/** ++ * Check if the attribute is a standard attribute or ++ * a message protocol specific attribute ++ * attr_id[31] = 0 for standard ++ * attr_id[31] = 1 for message protocol specific ++ */ ++static inline bool mpxy_is_std_attr(u32 attr_id) ++{ ++ return (attr_id >> 31) ? false : true; ++} ++ ++/** Find channel_id in registered channels list */ ++static struct sbi_mpxy_channel *mpxy_find_channel(u32 channel_id) ++{ ++ struct sbi_mpxy_channel *channel; ++ ++ sbi_list_for_each_entry(channel, &mpxy_channel_list, head) ++ if (channel->channel_id == channel_id) ++ return channel; ++ ++ return NULL; ++} ++ ++/** Copy attributes word size */ ++static void mpxy_copy_std_attrs(u32 *outmem, u32 *inmem, u32 count) ++{ ++ int idx; ++ for (idx = 0; idx < count; idx++) ++ outmem[idx] = cpu_to_le32(inmem[idx]); ++} ++ ++/** Check if any channel is registered with mpxy framework */ ++bool sbi_mpxy_channel_available(void) ++{ ++ return sbi_list_empty(&mpxy_channel_list) ? false : true; ++} ++ ++static void mpxy_std_attrs_init(struct sbi_mpxy_channel *channel) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ u32 capability = 0; ++ ++ /* Reset values */ ++ channel->attrs.msi_control = 0; ++ channel->attrs.msi_info.msi_data = 0; ++ channel->attrs.msi_info.msi_addr_lo = 0; ++ channel->attrs.msi_info.msi_addr_hi = 0; ++ channel->attrs.capability = 0; ++ channel->attrs.eventsstate_ctrl = 0; ++ ++ if (channel->send_message_with_response) ++ capability = CAP_ENABLE(capability, CAP_SEND_MSG_WITH_RESP_MASK); ++ ++ if (channel->send_message_without_response) ++ capability = CAP_ENABLE(capability, CAP_SEND_MSG_WITHOUT_RESP_MASK); ++ ++ if (channel->get_notification_events) { ++ capability = CAP_ENABLE(capability, CAP_GET_NOTIFICATIONS_MASK); ++ /** ++ * Check if MSI or SSE available for notification interrrupt. ++ * Priority given to MSI if both MSI and SSE are avaialble. ++ */ ++ if (ms->msi_avail) ++ capability = CAP_ENABLE(capability, CAP_MSI_MASK); ++ else if (ms->sse_avail) { ++ capability = CAP_ENABLE(capability, CAP_SSE_MASK); ++ /* TODO: Assign SSE EVENT_ID for the channel */ ++ } ++ ++ /** ++ * switch_eventstate callback support means support for events ++ * state reporting supoprt. Enable events state reporting in ++ * channel capability. ++ */ ++ if (channel->switch_eventsstate) ++ capability = CAP_ENABLE(capability, CAP_EVENTSSTATE_MASK); ++ } ++ ++ channel->attrs.capability = capability; ++} ++ ++/** ++ * Register a channel with MPXY framework. ++ * Called by message protocol drivers ++ */ ++int sbi_mpxy_register_channel(struct sbi_mpxy_channel *channel) ++{ ++ if (!channel) ++ return SBI_EINVAL; ++ ++ if (mpxy_find_channel(channel->channel_id)) ++ return SBI_EALREADY; ++ ++ /* Initialize channel specific attributes */ ++ mpxy_std_attrs_init(channel); ++ ++ /* Update shared memory size if required */ ++ if (mpxy_shmem_size < channel->attrs.msg_data_maxlen) { ++ mpxy_shmem_size = channel->attrs.msg_data_maxlen; ++ mpxy_shmem_size = (mpxy_shmem_size + (PAGE_SIZE - 1)) / PAGE_SIZE; ++ } ++ ++ sbi_list_add_tail(&channel->head, &mpxy_channel_list); ++ ++ return SBI_OK; ++} ++ ++/** Setup per domain MPXY state data */ ++static int domain_mpxy_state_data_setup(struct sbi_domain *dom, ++ struct sbi_domain_data *data, ++ void *data_ptr) ++{ ++ struct mpxy_state **dom_hartindex_to_mpxy_state_table = data_ptr; ++ struct mpxy_state *ms; ++ u32 i; ++ ++ sbi_hartmask_for_each_hart(i, dom->possible_harts) { ++ ms = sbi_zalloc(sizeof(*ms)); ++ if (!ms) ++ return SBI_ENOMEM; ++ ++ /* ++ * TODO: Proper support for checking msi support from ++ * platform. Currently disable msi and sse and use ++ * polling ++ */ ++ ms->msi_avail = false; ++ ms->sse_avail = false; ++ ++ sbi_mpxy_shmem_disable(ms); ++ ++ dom_hartindex_to_mpxy_state_table[i] = ms; ++ } ++ ++ return 0; ++} ++ ++/** Cleanup per domain MPXY state data */ ++static void domain_mpxy_state_data_cleanup(struct sbi_domain *dom, ++ struct sbi_domain_data *data, ++ void *data_ptr) ++{ ++ struct mpxy_state **dom_hartindex_to_mpxy_state_table = data_ptr; ++ u32 i; ++ ++ sbi_hartmask_for_each_hart(i, dom->possible_harts) ++ sbi_free(dom_hartindex_to_mpxy_state_table[i]); ++} ++ ++static struct sbi_domain_data dmspriv = { ++ .data_setup = domain_mpxy_state_data_setup, ++ .data_cleanup = domain_mpxy_state_data_cleanup, ++}; ++ ++/** ++ * Get per-domain MPXY state pointer for a given domain and HART index ++ * @param dom pointer to domain ++ * @param hartindex the HART index ++ * ++ * @return per-domain MPXY state pointer for given HART index ++ */ ++static struct mpxy_state *sbi_domain_get_mpxy_state(struct sbi_domain *dom, ++ u32 hartindex) ++{ ++ struct mpxy_state **dom_hartindex_to_mpxy_state_table; ++ ++ dom_hartindex_to_mpxy_state_table = sbi_domain_data_ptr(dom, &dmspriv); ++ if (!dom_hartindex_to_mpxy_state_table || ++ !sbi_hartindex_valid(hartindex)) ++ return NULL; ++ ++ return dom_hartindex_to_mpxy_state_table[hartindex]; ++} ++ ++int sbi_mpxy_init(struct sbi_scratch *scratch) ++{ ++ int ret; ++ ++ /** ++ * Allocate per-domain and per-hart MPXY state data. ++ * The data type is "struct mpxy_state **" whose memory space will be ++ * dynamically allocated by domain_setup_data_one() and ++ * domain_mpxy_state_data_setup(). Calculate needed size of memory space ++ * here. ++ */ ++ dmspriv.data_size = sizeof(struct mpxy_state *) * sbi_hart_count(); ++ ret = sbi_domain_register_data(&dmspriv); ++ if (ret) ++ return ret; ++ ++ return sbi_platform_mpxy_init(sbi_platform_ptr(scratch)); ++} ++ ++unsigned long sbi_mpxy_get_shmem_size(void) ++{ ++ return mpxy_shmem_size; ++} ++ ++int sbi_mpxy_set_shmem(unsigned long shmem_phys_lo, ++ unsigned long shmem_phys_hi, ++ unsigned long flags) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ unsigned long *ret_buf; ++ ++ /** Disable shared memory if both hi and lo have all bit 1s */ ++ if (shmem_phys_lo == INVALID_ADDR && ++ shmem_phys_hi == INVALID_ADDR) { ++ sbi_mpxy_shmem_disable(ms); ++ return SBI_SUCCESS; ++ } ++ ++ if (flags >= SBI_EXT_MPXY_SHMEM_FLAG_MAX_IDX) ++ return SBI_ERR_INVALID_PARAM; ++ ++ /** Check shared memory size and address aligned to 4K Page */ ++ if (shmem_phys_lo & ~PAGE_MASK) ++ return SBI_ERR_INVALID_PARAM; ++ ++ /* ++ * On RV32, the M-mode can only access the first 4GB of ++ * the physical address space because M-mode does not have ++ * MMU to access full 34-bit physical address space. ++ * So fail if the upper 32 bits of the physical address ++ * is non-zero on RV32. ++ * ++ * On RV64, kernel sets upper 64bit address part to zero. ++ * So fail if the upper 64bit of the physical address ++ * is non-zero on RV64. ++ */ ++ if (shmem_phys_hi) ++ return SBI_ERR_INVALID_ADDRESS; ++ ++ if (!sbi_domain_check_addr( ++ sbi_domain_thishart_ptr(), ++ SHMEM_PHYS_ADDR(shmem_phys_hi, shmem_phys_lo), PRV_S, ++ SBI_DOMAIN_READ | SBI_DOMAIN_WRITE)) ++ return SBI_ERR_INVALID_ADDRESS; ++ ++ /** Save the current shmem details in new shmem region */ ++ if (flags == SBI_EXT_MPXY_SHMEM_FLAG_OVERWRITE_RETURN) { ++ ret_buf = (unsigned long *)(ulong)SHMEM_PHYS_ADDR(shmem_phys_hi, ++ shmem_phys_lo); ++ ret_buf[0] = cpu_to_lle(ms->shmem.shmem_addr_lo); ++ ret_buf[1] = cpu_to_lle(ms->shmem.shmem_addr_hi); ++ } ++ ++ /** Setup the new shared memory */ ++ ms->shmem.shmem_addr_lo = shmem_phys_lo; ++ ms->shmem.shmem_addr_hi = shmem_phys_hi; ++ ++ return SBI_SUCCESS; ++} ++ ++int sbi_mpxy_get_channel_ids(u32 start_index) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ u32 remaining, returned, max_channelids; ++ u32 node_index = 0, node_ret = 0; ++ struct sbi_mpxy_channel *channel; ++ u32 channels_count = 0; ++ u32 *shmem_base; ++ ++ if (!mpxy_shmem_enabled(ms)) ++ return SBI_ERR_NO_SHMEM; ++ ++ sbi_list_for_each_entry(channel, &mpxy_channel_list, head) ++ channels_count += 1; ++ ++ if (start_index > channels_count) ++ return SBI_ERR_INVALID_PARAM; ++ ++ shmem_base = hart_shmem_base(ms); ++ ++ /** number of channel ids which can be stored in shmem adjusting ++ * for remaining and returned fields */ ++ max_channelids = (mpxy_shmem_size / sizeof(u32)) - 2; ++ /* total remaining from the start index */ ++ remaining = channels_count - start_index; ++ /* how many can be returned */ ++ returned = (remaining > max_channelids)? max_channelids : remaining; ++ ++ // Iterate over the list of channels to get the channel ids. ++ sbi_list_for_each_entry(channel, &mpxy_channel_list, head) { ++ if (node_index >= start_index && ++ node_index < (start_index + returned)) { ++ shmem_base[2 + node_ret] = cpu_to_le32(channel->channel_id); ++ node_ret += 1; ++ } ++ ++ node_index += 1; ++ } ++ ++ /* final remaininig channel ids */ ++ remaining = channels_count - (start_index + returned); ++ ++ shmem_base[0] = cpu_to_le32(remaining); ++ shmem_base[1] = cpu_to_le32(returned); ++ ++ return SBI_SUCCESS; ++} ++ ++int sbi_mpxy_read_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ int ret = SBI_SUCCESS; ++ u32 *attr_ptr, end_id; ++ void *shmem_base; ++ ++ if (!mpxy_shmem_enabled(ms)) ++ return SBI_ERR_NO_SHMEM; ++ ++ struct sbi_mpxy_channel *channel = mpxy_find_channel(channel_id); ++ if (!channel) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ /* base attribute id is not a defined std attribute or reserved */ ++ if (base_attr_id >= SBI_MPXY_ATTR_STD_ATTR_MAX_IDX && ++ base_attr_id < SBI_MPXY_ATTR_MSGPROTO_ATTR_START) ++ return SBI_ERR_INVALID_PARAM; ++ ++ /* Sanity check for base_attr_id and attr_count */ ++ if (!attr_count || (attr_count > (mpxy_shmem_size / ATTR_SIZE))) ++ return SBI_ERR_INVALID_PARAM; ++ ++ shmem_base = hart_shmem_base(ms); ++ end_id = base_attr_id + attr_count - 1; ++ ++ /* Standard attributes range check */ ++ if (mpxy_is_std_attr(base_attr_id)) { ++ if (end_id >= SBI_MPXY_ATTR_STD_ATTR_MAX_IDX) { ++ ret = SBI_EBAD_RANGE; ++ goto out; ++ } ++ ++ attr_ptr = (u32 *)&channel->attrs; ++ mpxy_copy_std_attrs((u32 *)shmem_base, &attr_ptr[base_attr_id], ++ attr_count); ++ } else { ++ /** ++ * Even if the message protocol driver does not provide ++ * read attribute callback, return bad range error instead ++ * of not supported to let client distinguish it from channel ++ * id not supported. ++ * Check the complate range supported for message protocol ++ * attributes. Actual supported attributes will be checked ++ * by the message protocol driver. ++ */ ++ if (!channel->read_attributes || ++ end_id > SBI_MPXY_ATTR_MSGPROTO_ATTR_END) { ++ ret = SBI_ERR_BAD_RANGE; ++ goto out; ++ } ++ ++ /** ++ * Function expected to return the SBI supported errors ++ * At this point both base attribute id and only the mpxy ++ * supported range been verified. Platform callback must ++ * check if the range requested is supported by message ++ * protocol driver */ ++ ret = channel->read_attributes(channel, ++ (u32 *)shmem_base, ++ base_attr_id, attr_count); ++ } ++out: ++ return ret; ++} ++ ++/** ++ * Verify the channel standard attribute wrt to write permission ++ * and the value to be set if valid or not. ++ * Only attributes needs to be checked which are defined Read/Write ++ * permission. Other with Readonly permission will result in error. ++ * ++ * Attributes values to be written must also be checked because ++ * before writing a range of attributes, we need to make sure that ++ * either complete range of attributes is written successfully or not ++ * at all. ++ */ ++static int mpxy_check_write_std_attr(struct sbi_mpxy_channel *channel, ++ u32 attr_id, u32 attr_val) ++{ ++ struct sbi_mpxy_channel_attrs *attrs = &channel->attrs; ++ int ret = SBI_SUCCESS; ++ ++ switch(attr_id) { ++ case SBI_MPXY_ATTR_MSI_CONTROL: ++ if (attr_val > 1) ++ ret = SBI_ERR_INVALID_PARAM; ++ if (attr_val == 1 && ++ (attrs->msi_info.msi_addr_lo == INVALID_ADDR) && ++ (attrs->msi_info.msi_addr_hi == INVALID_ADDR)) ++ ret = SBI_ERR_DENIED; ++ break; ++ case SBI_MPXY_ATTR_MSI_ADDR_LO: ++ case SBI_MPXY_ATTR_MSI_ADDR_HI: ++ case SBI_MPXY_ATTR_MSI_DATA: ++ ret = SBI_SUCCESS; ++ break; ++ case SBI_MPXY_ATTR_EVENTS_STATE_CONTROL: ++ if (attr_val > 1) ++ ret = SBI_ERR_INVALID_PARAM; ++ break; ++ default: ++ /** All RO access attributes falls under default */ ++ ret = SBI_ERR_BAD_RANGE; ++ }; ++ ++ return ret; ++} ++ ++/** ++ * Write the attribute value ++ */ ++static void mpxy_write_std_attr(struct sbi_mpxy_channel *channel, u32 attr_id, ++ u32 attr_val) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ struct sbi_mpxy_channel_attrs *attrs = &channel->attrs; ++ ++ switch(attr_id) { ++ case SBI_MPXY_ATTR_MSI_CONTROL: ++ if (ms->msi_avail && attr_val <= 1) ++ attrs->msi_control = attr_val; ++ break; ++ case SBI_MPXY_ATTR_MSI_ADDR_LO: ++ if (ms->msi_avail) ++ attrs->msi_info.msi_addr_lo = attr_val; ++ break; ++ case SBI_MPXY_ATTR_MSI_ADDR_HI: ++ if (ms->msi_avail) ++ attrs->msi_info.msi_addr_hi = attr_val; ++ break; ++ case SBI_MPXY_ATTR_MSI_DATA: ++ if (ms->msi_avail) ++ attrs->msi_info.msi_data = attr_val; ++ break; ++ case SBI_MPXY_ATTR_EVENTS_STATE_CONTROL: ++ if (channel->switch_eventsstate && attr_val <= 1) { ++ attrs->eventsstate_ctrl = attr_val; ++ /* message protocol callback to enable/disable ++ * events state reporting. */ ++ channel->switch_eventsstate(attr_val); ++ } ++ ++ break; ++ }; ++} ++ ++int sbi_mpxy_write_attrs(u32 channel_id, u32 base_attr_id, u32 attr_count) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ u32 *mem_ptr, attr_id, end_id, attr_val; ++ struct sbi_mpxy_channel *channel; ++ int ret, mem_idx; ++ void *shmem_base; ++ ++ if (!mpxy_shmem_enabled(ms)) ++ return SBI_ERR_NO_SHMEM; ++ ++ channel = mpxy_find_channel(channel_id); ++ if (!channel) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ /* base attribute id is not a defined std attribute or reserved */ ++ if (base_attr_id >= SBI_MPXY_ATTR_STD_ATTR_MAX_IDX && ++ base_attr_id < SBI_MPXY_ATTR_MSGPROTO_ATTR_START) ++ return SBI_ERR_INVALID_PARAM; ++ ++ /* Sanity check for base_attr_id and attr_count */ ++ if (!attr_count || (attr_count > (mpxy_shmem_size / ATTR_SIZE))) ++ return SBI_ERR_INVALID_PARAM; ++ ++ shmem_base = hart_shmem_base(ms); ++ end_id = base_attr_id + attr_count - 1; ++ ++ mem_ptr = (u32 *)shmem_base; ++ ++ if (mpxy_is_std_attr(base_attr_id)) { ++ if (end_id >= SBI_MPXY_ATTR_STD_ATTR_MAX_IDX) { ++ ret = SBI_ERR_BAD_RANGE; ++ goto out; ++ } ++ ++ /** Verify the attribute ids range and values */ ++ mem_idx = 0; ++ for (attr_id = base_attr_id; attr_id <= end_id; attr_id++) { ++ attr_val = le32_to_cpu(mem_ptr[mem_idx++]); ++ ret = mpxy_check_write_std_attr(channel, ++ attr_id, attr_val); ++ if (ret) ++ goto out; ++ } ++ ++ /* Write the attribute ids values */ ++ mem_idx = 0; ++ for (attr_id = base_attr_id; attr_id <= end_id; attr_id++) { ++ attr_val = le32_to_cpu(mem_ptr[mem_idx++]); ++ mpxy_write_std_attr(channel, attr_id, attr_val); ++ } ++ } else {/** ++ * Message protocol specific attributes: ++ * If attributes belong to message protocol, they ++ * are simply passed to the message protocol driver ++ * callback after checking the valid range. ++ * Attributes contiguous range & permission & other checks ++ * are done by the mpxy and message protocol glue layer. ++ */ ++ /** ++ * Even if the message protocol driver does not provide ++ * write attribute callback, return bad range error instead ++ * of not supported to let client distinguish it from channel ++ * id not supported. ++ */ ++ if (!channel->write_attributes || ++ end_id > SBI_MPXY_ATTR_MSGPROTO_ATTR_END) { ++ ret = SBI_ERR_BAD_RANGE; ++ goto out; ++ } ++ ++ /** ++ * Function expected to return the SBI supported errors ++ * At this point both base attribute id and only the mpxy ++ * supported range been verified. Platform callback must ++ * check if the range requested is supported by message ++ * protocol driver */ ++ ret = channel->write_attributes(channel, ++ (u32 *)shmem_base, ++ base_attr_id, attr_count); ++ } ++out: ++ return ret; ++} ++ ++int sbi_mpxy_send_message(u32 channel_id, u8 msg_id, ++ unsigned long msg_data_len, ++ unsigned long *resp_data_len) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ struct sbi_mpxy_channel *channel; ++ void *shmem_base, *resp_buf; ++ u32 resp_bufsize; ++ int ret; ++ ++ if (!mpxy_shmem_enabled(ms)) ++ return SBI_ERR_NO_SHMEM; ++ ++ channel = mpxy_find_channel(channel_id); ++ if (!channel) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ if (resp_data_len && !channel->send_message_with_response) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ if (!resp_data_len && !channel->send_message_without_response) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ if (msg_data_len > mpxy_shmem_size || ++ msg_data_len > channel->attrs.msg_data_maxlen) ++ return SBI_ERR_INVALID_PARAM; ++ ++ shmem_base = hart_shmem_base(ms); ++ ++ if (resp_data_len) { ++ resp_buf = shmem_base; ++ resp_bufsize = mpxy_shmem_size; ++ ret = channel->send_message_with_response(channel, msg_id, ++ shmem_base, ++ msg_data_len, ++ resp_buf, ++ resp_bufsize, ++ resp_data_len); ++ } else { ++ ret = channel->send_message_without_response(channel, msg_id, ++ shmem_base, ++ msg_data_len); ++ } ++ ++ if (ret == SBI_ERR_TIMEOUT || ret == SBI_ERR_IO) ++ return ret; ++ else if (ret) ++ return SBI_ERR_FAILED; ++ ++ if (resp_data_len && ++ (*resp_data_len > mpxy_shmem_size || ++ *resp_data_len > channel->attrs.msg_data_maxlen)) ++ return SBI_ERR_FAILED; ++ ++ return SBI_SUCCESS; ++} ++ ++int sbi_mpxy_get_notification_events(u32 channel_id, unsigned long *events_len) ++{ ++ struct mpxy_state *ms = sbi_domain_mpxy_state_thishart_ptr(); ++ struct sbi_mpxy_channel *channel; ++ void *eventsbuf, *shmem_base; ++ int ret; ++ ++ if (!mpxy_shmem_enabled(ms)) ++ return SBI_ERR_NO_SHMEM; ++ ++ channel = mpxy_find_channel(channel_id); ++ if (!channel || !channel->get_notification_events) ++ return SBI_ERR_NOT_SUPPORTED; ++ ++ shmem_base = hart_shmem_base(ms); ++ eventsbuf = shmem_base; ++ ret = channel->get_notification_events(channel, eventsbuf, ++ mpxy_shmem_size, ++ events_len); ++ ++ if (ret) ++ return ret; ++ ++ if (*events_len > (mpxy_shmem_size - 16)) ++ return SBI_ERR_FAILED; ++ ++ return SBI_SUCCESS; ++} +diff --git a/lib/sbi/sbi_scratch.c b/lib/sbi/sbi_scratch.c +index 87b34c6d..bb2e6909 100644 +--- a/lib/sbi/sbi_scratch.c ++++ b/lib/sbi/sbi_scratch.c +@@ -14,19 +14,38 @@ + #include + #include + ++u32 sbi_scratch_hart_count; ++ + u32 last_hartid_having_scratch = SBI_HARTMASK_MAX_BITS - 1; + struct sbi_scratch *hartid_to_scratch_table[SBI_HARTMASK_MAX_BITS] = { 0 }; ++u32 hartindex_to_hartid_table[SBI_HARTMASK_MAX_BITS] = { [0 ... SBI_HARTMASK_MAX_BITS-1] = -1U }; + + static spinlock_t extra_lock = SPIN_LOCK_INITIALIZER; + static unsigned long extra_offset = SBI_SCRATCH_EXTRA_SPACE_OFFSET; + + typedef struct sbi_scratch *(*hartid2scratch)(ulong hartid, ulong hartindex); + +-int sbi_scratch_init(struct sbi_scratch *scratch) ++u32 sbi_hartid_to_hartindex(u32 hartid) + { + u32 i; ++ for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) { ++ if (hartindex_to_hartid_table[i] == hartid) ++ return i; ++ } ++ ++ return -1U; ++} ++ ++int sbi_scratch_init(struct sbi_scratch *scratch) ++{ ++ u32 i, hart_count; + const struct sbi_platform *plat = sbi_platform_ptr(scratch); + ++ hart_count = plat->hart_count; ++ if (hart_count > SBI_HARTMASK_MAX_BITS) ++ hart_count = SBI_HARTMASK_MAX_BITS; ++ sbi_scratch_hart_count = hart_count; ++ + for (i = 0; i < SBI_HARTMASK_MAX_BITS; i++) { + if (sbi_platform_hart_invalid(plat, i)) + continue; +@@ -35,6 +54,7 @@ int sbi_scratch_init(struct sbi_scratch *scratch) + sbi_platform_hart_index(plat, i)); + if (hartid_to_scratch_table[i]) + last_hartid_having_scratch = i; ++ hartindex_to_hartid_table[sbi_platform_hart_index(plat, i)] = i; + } + + return 0; +diff --git a/lib/sbi/sbi_sse.c b/lib/sbi/sbi_sse.c +new file mode 100644 +index 00000000..5d996215 +--- /dev/null ++++ b/lib/sbi/sbi_sse.c +@@ -0,0 +1,1296 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Rivos Systems Inc. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define sse_get_hart_state_ptr(__scratch) \ ++ sbi_scratch_read_type((__scratch), void *, shs_ptr_off) ++ ++#define sse_thishart_state_ptr() \ ++ sse_get_hart_state_ptr(sbi_scratch_thishart_ptr()) ++ ++#define sse_set_hart_state_ptr(__scratch, __sse_state) \ ++ sbi_scratch_write_type((__scratch), void *, shs_ptr_off, (__sse_state)) ++ ++#define EVENT_IS_GLOBAL(__event_id) ((__event_id) & SBI_SSE_EVENT_GLOBAL_BIT) ++ ++#define sse_event_invoke_cb(_event, _cb, ...) \ ++ { \ ++ const struct sbi_sse_cb_ops *__ops = _event->info->cb_ops; \ ++ if (__ops && __ops->_cb) \ ++ __ops->_cb(_event->event_id, ##__VA_ARGS__); \ ++ } ++ ++struct sse_entry_state { ++ /** entry pc */ ++ unsigned long pc; ++ /** a6 register state */ ++ unsigned long arg; ++}; ++ ++struct sse_interrupted_state { ++ /** sepc register state */ ++ unsigned long sepc; ++ /** flags register state */ ++ unsigned long flags; ++ /** a6 register state */ ++ unsigned long a6; ++ /** a7 register state */ ++ unsigned long a7; ++}; ++ ++struct sse_ipi_inject_data { ++ uint32_t event_id; ++}; ++ ++struct sbi_sse_event_attrs { ++ unsigned long status; ++ unsigned long prio; ++ unsigned long config; ++ unsigned long hartid; ++ struct sse_entry_state entry; ++ struct sse_interrupted_state interrupted; ++}; ++ ++/* Make sure all attributes are packed for direct memcpy in ATTR_READ */ ++#define assert_field_offset(field, attr_offset) \ ++ _Static_assert( \ ++ ((offsetof(struct sbi_sse_event_attrs, field)) / \ ++ sizeof(unsigned long)) == attr_offset, \ ++ "field " #field \ ++ " from struct sbi_sse_event_attrs invalid offset, expected " #attr_offset) ++ ++assert_field_offset(status, SBI_SSE_ATTR_STATUS); ++assert_field_offset(prio, SBI_SSE_ATTR_PRIO); ++assert_field_offset(config, SBI_SSE_ATTR_CONFIG); ++assert_field_offset(hartid, SBI_SSE_ATTR_PREFERRED_HART); ++assert_field_offset(entry.pc, SBI_SSE_ATTR_ENTRY_PC); ++assert_field_offset(entry.arg, SBI_SSE_ATTR_ENTRY_ARG); ++assert_field_offset(interrupted.sepc, SBI_SSE_ATTR_INTERRUPTED_SEPC); ++assert_field_offset(interrupted.flags, SBI_SSE_ATTR_INTERRUPTED_FLAGS); ++assert_field_offset(interrupted.a6, SBI_SSE_ATTR_INTERRUPTED_A6); ++assert_field_offset(interrupted.a7, SBI_SSE_ATTR_INTERRUPTED_A7); ++ ++struct sbi_sse_event { ++ struct sbi_sse_event_attrs attrs; ++ uint32_t event_id; ++ u32 hartindex; ++ struct sse_event_info *info; ++ struct sbi_dlist node; ++}; ++ ++/** Per-hart state */ ++struct sse_hart_state { ++ /* Priority sorted list of enabled events (global and local in >= ++ * ENABLED state). This list is protected by the enabled_event_lock. ++ * ++ * Global events can also be inserted in this list. Since these events ++ * can be accessed by all harts, we actually need to lock independently ++ * (see sse_global_event). ++ * ++ * Local events do not actually need to be locked since, we do ++ * not have preemption and there are solely accessed by the current ++ * hart. So when inserting a local event in this list, we just need to ++ * lock the list at insertion/removal time. ++ * ++ * When an event is in a state >= ENABLED, then it is inserted in the ++ * this enabled_event_list and thus can only be removed from this ++ * list upon disable ecall or on complete with ONESHOT flag. ++ */ ++ struct sbi_dlist enabled_event_list; ++ ++ /** ++ * Lock that protects enabled_event_list ++ */ ++ spinlock_t enabled_event_lock; ++ ++ /** ++ * List of local events allocated at boot time. ++ */ ++ struct sbi_sse_event *local_events; ++ ++ /** ++ * State to track if the hart is ready to take sse events. ++ * One hart cannot modify this state of another hart. ++ */ ++ bool masked; ++}; ++ ++/** ++ * Global events are accessible by all harts ++ */ ++struct sse_global_event { ++ /** ++ * global event struct ++ */ ++ struct sbi_sse_event event; ++ ++ /** ++ * Global event lock protecting access from multiple harts from ecall to ++ * the event. ++ */ ++ spinlock_t lock; ++}; ++ ++struct sse_event_info { ++ uint32_t event_id; ++ const struct sbi_sse_cb_ops *cb_ops; ++ SBI_SLIST_NODE(sse_event_info); ++}; ++ ++static unsigned int local_event_count; ++static unsigned int global_event_count; ++static struct sse_global_event *global_events; ++ ++static unsigned long sse_inject_fifo_off; ++static unsigned long sse_inject_fifo_mem_off; ++/* Offset of pointer to SSE HART state in scratch space */ ++static unsigned long shs_ptr_off; ++ ++static u32 sse_ipi_inject_event = SBI_IPI_EVENT_MAX; ++ ++static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id); ++ ++struct sse_event_info global_software_event = { ++ .event_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE, ++ SBI_SLIST_NODE_INIT(NULL), ++}; ++ ++struct sse_event_info local_software_event = { ++ .event_id = SBI_SSE_EVENT_LOCAL_SOFTWARE, ++ SBI_SLIST_NODE_INIT(&global_software_event), ++}; ++ ++static SBI_SLIST_HEAD(supported_events, sse_event_info) = ++ SBI_SLIST_HEAD_INIT(&local_software_event); ++ ++/* ++ * This array is used to distinguish between standard event and platform ++ * events in order to return SBI_ERR_NOT_SUPPORTED for them. ++ */ ++static const uint32_t standard_events[] = { ++ SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, ++ SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, ++ SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, ++ SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, ++ SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, ++ SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, ++ SBI_SSE_EVENT_LOCAL_SOFTWARE, ++ SBI_SSE_EVENT_GLOBAL_SOFTWARE, ++}; ++ ++static bool sse_is_standard_event(uint32_t event_id) ++{ ++ int i; ++ ++ for (i = 0; i < array_size(standard_events); i++) { ++ if (event_id == standard_events[i]) ++ return true; ++ } ++ ++ return false; ++} ++ ++static struct sse_event_info *sse_event_info_get(uint32_t event_id) ++{ ++ struct sse_event_info *info; ++ ++ SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) { ++ if (info->event_id == event_id) ++ return info; ++ } ++ ++ return NULL; ++} ++ ++static unsigned long sse_event_state(struct sbi_sse_event *e) ++{ ++ return e->attrs.status & SBI_SSE_ATTR_STATUS_STATE_MASK; ++} ++ ++static unsigned long sse_event_pending(struct sbi_sse_event *e) ++{ ++ return !!(e->attrs.status & BIT(SBI_SSE_ATTR_STATUS_PENDING_OFFSET)); ++} ++ ++static bool sse_event_is_global(struct sbi_sse_event *e) ++{ ++ return EVENT_IS_GLOBAL(e->event_id); ++} ++ ++static bool sse_event_is_local(struct sbi_sse_event *e) ++{ ++ return !sse_event_is_global(e); ++} ++ ++/** ++ * If event is global, must be called under global event lock ++ */ ++static struct sse_hart_state *sse_get_hart_state(struct sbi_sse_event *e) ++{ ++ struct sbi_scratch *s = sbi_hartid_to_scratch(sbi_hartindex_to_hartid(e->hartindex)); ++ ++ return sse_get_hart_state_ptr(s); ++} ++ ++static struct sse_global_event *sse_get_global_event(struct sbi_sse_event *e) ++{ ++ return container_of(e, struct sse_global_event, event); ++} ++ ++/** ++ * If event is global, must be called under enabled event lock ++ */ ++static void sse_enabled_event_lock(struct sbi_sse_event *e) ++{ ++ struct sse_hart_state *shs; ++ ++ shs = sse_get_hart_state(e); ++ spin_lock(&shs->enabled_event_lock); ++} ++ ++/** ++ * If event is global, must be called under enabled event lock ++ */ ++static void sse_enabled_event_unlock(struct sbi_sse_event *e) ++{ ++ struct sse_hart_state *shs; ++ ++ shs = sse_get_hart_state(e); ++ spin_unlock(&shs->enabled_event_lock); ++} ++ ++static void sse_event_set_state(struct sbi_sse_event *e, ++ unsigned long new_state) ++{ ++ e->attrs.status &= ~SBI_SSE_ATTR_STATUS_STATE_MASK; ++ e->attrs.status |= new_state; ++} ++ ++static int sse_event_get(uint32_t event_id, struct sbi_sse_event **eret) ++{ ++ unsigned int i; ++ struct sbi_sse_event *e; ++ struct sse_hart_state *shs; ++ ++ if (!eret) ++ return SBI_EINVAL; ++ ++ if (EVENT_IS_GLOBAL(event_id)) { ++ for (i = 0; i < global_event_count; i++) { ++ e = &global_events[i].event; ++ if (e->event_id == event_id) { ++ spin_lock(&global_events[i].lock); ++ *eret = e; ++ return SBI_SUCCESS; ++ } ++ } ++ } else { ++ shs = sse_thishart_state_ptr(); ++ for (i = 0; i < local_event_count; i++) { ++ e = &shs->local_events[i]; ++ if (e->event_id == event_id) { ++ *eret = e; ++ return SBI_SUCCESS; ++ } ++ } ++ } ++ ++ /* Check if the event is a standard one but not supported */ ++ if (sse_is_standard_event(event_id)) ++ return SBI_ENOTSUPP; ++ ++ /* If not supported nor a standard event, it is invalid */ ++ return SBI_EINVAL; ++} ++ ++static void sse_event_put(struct sbi_sse_event *e) ++{ ++ struct sse_global_event *ge; ++ ++ if (sse_event_is_local(e)) ++ return; ++ ++ ge = sse_get_global_event(e); ++ spin_unlock(&ge->lock); ++} ++ ++static void sse_event_remove_from_list(struct sbi_sse_event *e) ++{ ++ sbi_list_del(&e->node); ++} ++ ++/** ++ * Must be called under owner hart lock ++ */ ++static void sse_event_add_to_list(struct sbi_sse_event *e) ++{ ++ struct sse_hart_state *state = sse_get_hart_state(e); ++ struct sbi_sse_event *tmp; ++ ++ sbi_list_for_each_entry(tmp, &state->enabled_event_list, node) { ++ if (e->attrs.prio < tmp->attrs.prio) ++ break; ++ if (e->attrs.prio == tmp->attrs.prio && ++ e->event_id < tmp->event_id) ++ break; ++ } ++ sbi_list_add_tail(&e->node, &tmp->node); ++} ++ ++/** ++ * Must be called under owner hart lock ++ */ ++static int sse_event_disable(struct sbi_sse_event *e) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_ENABLED) ++ return SBI_EINVALID_STATE; ++ ++ sse_event_invoke_cb(e, disable_cb); ++ ++ sse_event_remove_from_list(e); ++ sse_event_set_state(e, SBI_SSE_STATE_REGISTERED); ++ ++ return SBI_OK; ++} ++ ++static int sse_event_set_hart_id_check(struct sbi_sse_event *e, ++ unsigned long new_hartid) ++{ ++ int hstate; ++ unsigned int hartid = (uint32_t)new_hartid; ++ struct sbi_domain *hd = sbi_domain_thishart_ptr(); ++ ++ if (!sse_event_is_global(e)) ++ return SBI_EDENIED; ++ ++ if (!sbi_domain_is_assigned_hart(hd, hartid)) ++ return SBI_EINVAL; ++ ++ hstate = sbi_hsm_hart_get_state(hd, hartid); ++ if (hstate != SBI_HSM_STATE_STARTED) ++ return SBI_EINVAL; ++ ++ return SBI_OK; ++} ++ ++static int sse_event_set_attr_check(struct sbi_sse_event *e, uint32_t attr_id, ++ unsigned long val) ++{ ++ switch (attr_id) { ++ case SBI_SSE_ATTR_CONFIG: ++ if (sse_event_state(e) >= SBI_SSE_STATE_ENABLED) ++ return SBI_EINVALID_STATE; ++ ++ if (val & ~SBI_SSE_ATTR_CONFIG_ONESHOT) ++ return SBI_EINVAL; ++ ++ return SBI_OK; ++ case SBI_SSE_ATTR_PRIO: ++ if (sse_event_state(e) >= SBI_SSE_STATE_ENABLED) ++ return SBI_EINVALID_STATE; ++ ++#if __riscv_xlen > 32 ++ if (val != (uint32_t)val) ++ return SBI_EINVAL; ++#endif ++ return SBI_OK; ++ case SBI_SSE_ATTR_PREFERRED_HART: ++ if (sse_event_state(e) >= SBI_SSE_STATE_ENABLED) ++ return SBI_EINVALID_STATE; ++ ++ return sse_event_set_hart_id_check(e, val); ++ case SBI_SSE_ATTR_INTERRUPTED_FLAGS: ++ if (val & ~(SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP | ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE | ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV | ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP | ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP | ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT)) ++ return SBI_EINVAL; ++ __attribute__((__fallthrough__)); ++ case SBI_SSE_ATTR_INTERRUPTED_SEPC: ++ case SBI_SSE_ATTR_INTERRUPTED_A6: ++ case SBI_SSE_ATTR_INTERRUPTED_A7: ++ if (sse_event_state(e) != SBI_SSE_STATE_RUNNING) ++ return SBI_EINVALID_STATE; ++ ++ if (current_hartid() != e->attrs.hartid) ++ return SBI_EINVAL; ++ ++ return SBI_OK; ++ default: ++ /* ++ * Attribute range validity was already checked by ++ * sbi_sse_attr_check(). If we end up here, attribute was not ++ * handled by the above 'case' statements and thus it is ++ * read-only. ++ */ ++ return SBI_EDENIED; ++ } ++} ++ ++static void sse_event_set_attr(struct sbi_sse_event *e, uint32_t attr_id, ++ unsigned long val) ++{ ++ switch (attr_id) { ++ case SBI_SSE_ATTR_CONFIG: ++ e->attrs.config = val; ++ break; ++ case SBI_SSE_ATTR_PRIO: ++ e->attrs.prio = (uint32_t)val; ++ break; ++ case SBI_SSE_ATTR_PREFERRED_HART: ++ e->attrs.hartid = val; ++ e->hartindex = sbi_hartid_to_hartindex(val); ++ sse_event_invoke_cb(e, set_hartid_cb, val); ++ break; ++ ++ case SBI_SSE_ATTR_INTERRUPTED_SEPC: ++ e->attrs.interrupted.sepc = val; ++ break; ++ case SBI_SSE_ATTR_INTERRUPTED_FLAGS: ++ e->attrs.interrupted.flags = val; ++ break; ++ case SBI_SSE_ATTR_INTERRUPTED_A6: ++ e->attrs.interrupted.a6 = val; ++ break; ++ case SBI_SSE_ATTR_INTERRUPTED_A7: ++ e->attrs.interrupted.a7 = val; ++ break; ++ } ++} ++ ++static int sse_event_register(struct sbi_sse_event *e, ++ unsigned long handler_entry_pc, ++ unsigned long handler_entry_arg) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_UNUSED) ++ return SBI_EINVALID_STATE; ++ ++ e->attrs.entry.pc = handler_entry_pc; ++ e->attrs.entry.arg = handler_entry_arg; ++ ++ sse_event_set_state(e, SBI_SSE_STATE_REGISTERED); ++ ++ sse_event_invoke_cb(e, register_cb); ++ ++ return 0; ++} ++ ++static int sse_event_unregister(struct sbi_sse_event *e) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_REGISTERED) ++ return SBI_EINVALID_STATE; ++ ++ sse_event_invoke_cb(e, unregister_cb); ++ ++ sse_event_set_state(e, SBI_SSE_STATE_UNUSED); ++ ++ return 0; ++} ++ ++static unsigned long sse_interrupted_flags(unsigned long mstatus) ++{ ++ unsigned long hstatus, flags = 0; ++ ++ if (mstatus & MSTATUS_SPIE) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE; ++ if (mstatus & MSTATUS_SPP) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP; ++ if (mstatus & MSTATUS_SPELP) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP; ++ if (mstatus & MSTATUS_SDT) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT; ++ ++ if (misa_extension('H')) { ++ hstatus = csr_read(CSR_HSTATUS); ++ if (hstatus & HSTATUS_SPV) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV; ++ if (hstatus & HSTATUS_SPVP) ++ flags |= SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP; ++ } ++ ++ return flags; ++} ++ ++static void sse_event_inject(struct sbi_sse_event *e, ++ struct sbi_trap_regs *regs) ++{ ++ struct sse_interrupted_state *i_ctx = &e->attrs.interrupted; ++ ++ sse_event_set_state(e, SBI_SSE_STATE_RUNNING); ++ ++ e->attrs.status = ~BIT(SBI_SSE_ATTR_STATUS_PENDING_OFFSET); ++ ++ i_ctx->a6 = regs->a6; ++ i_ctx->a7 = regs->a7; ++ i_ctx->flags = sse_interrupted_flags(regs->mstatus); ++ i_ctx->sepc = csr_read(CSR_SEPC); ++ ++ regs->mstatus &= ~(MSTATUS_SPP | SSTATUS_SPIE); ++ if (regs->mstatus & MSTATUS_MPP) ++ regs->mstatus |= MSTATUS_SPP; ++ if (regs->mstatus & MSTATUS_SIE) ++ regs->mstatus |= MSTATUS_SPIE; ++ ++ if (misa_extension('H')) { ++ unsigned long hstatus = csr_read(CSR_HSTATUS); ++ ++#if __riscv_xlen == 64 ++ if (regs->mstatus & MSTATUS_MPV) ++#elif __riscv_xlen == 32 ++ if (regs->mstatusH & MSTATUSH_MPV) ++#else ++#error "Unexpected __riscv_xlen" ++#endif ++ hstatus |= HSTATUS_SPV; ++ ++ hstatus &= ~HSTATUS_SPVP; ++ if (hstatus & HSTATUS_SPV && regs->mstatus & SSTATUS_SPP) ++ hstatus |= HSTATUS_SPVP; ++ ++ csr_write(CSR_HSTATUS, hstatus); ++ } ++ csr_write(CSR_SEPC, regs->mepc); ++ ++ /* Setup entry context */ ++ regs->a6 = current_hartid(); ++ regs->a7 = e->attrs.entry.arg; ++ regs->mepc = e->attrs.entry.pc; ++ ++ /* ++ * Return to S-mode with virtualization disabled, not expected landing ++ * pad, supervisor trap disabled. ++ */ ++ regs->mstatus &= ~(MSTATUS_MPP | MSTATUS_SIE | MSTATUS_SPELP); ++ regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT); ++ regs->mstatus |= MSTATUS_SDT; ++ ++#if __riscv_xlen == 64 ++ regs->mstatus &= ~MSTATUS_MPV; ++#elif __riscv_xlen == 32 ++ regs->mstatusH &= ~MSTATUSH_MPV; ++#else ++#error "Unexpected __riscv_xlen" ++#endif ++ ++} ++ ++static void sse_event_resume(struct sbi_sse_event *e, ++ struct sbi_trap_regs *regs) ++{ ++ struct sse_interrupted_state *i_ctx = &e->attrs.interrupted; ++ ++ regs->mepc = csr_read(CSR_SEPC); ++ ++ regs->mstatus &= ~MSTATUS_MPP; ++ if (regs->mstatus & MSTATUS_SPP) ++ regs->mstatus |= (PRV_S << MSTATUS_MPP_SHIFT); ++ ++ if (misa_extension('H')) { ++ unsigned long hstatus = csr_read(CSR_HSTATUS); ++#if __riscv_xlen == 64 ++ regs->mstatus &= ~MSTATUS_MPV; ++ if (hstatus & HSTATUS_SPV) ++ regs->mstatus |= MSTATUS_MPV; ++#elif __riscv_xlen == 32 ++ regs->mstatusH &= ~MSTATUSH_MPV; ++ if (hstatus & HSTATUS_SPV) ++ regs->mstatusH |= MSTATUSH_MPV; ++#else ++#error "Unexpected __riscv_xlen" ++#endif ++ hstatus &= ~(HSTATUS_SPV | HSTATUS_SPVP); ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV) ++ hstatus |= HSTATUS_SPV; ++ ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP) ++ hstatus |= HSTATUS_SPVP; ++ ++ csr_write(CSR_HSTATUS, hstatus); ++ } ++ ++ regs->mstatus &= ~MSTATUS_SIE; ++ if (regs->mstatus & MSTATUS_SPIE) ++ regs->mstatus |= MSTATUS_SIE; ++ ++ regs->mstatus &= ~MSTATUS_SPIE; ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE) ++ regs->mstatus |= MSTATUS_SPIE; ++ ++ regs->mstatus &= ~MSTATUS_SPP; ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP) ++ regs->mstatus |= MSTATUS_SPP; ++ ++ regs->mstatus &= ~MSTATUS_SPELP; ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPELP) ++ regs->mstatus |= MSTATUS_SPELP; ++ ++ regs->mstatus &= ~MSTATUS_SDT; ++ if (i_ctx->flags & SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SDT) ++ regs->mstatus |= MSTATUS_SDT; ++ ++ regs->a7 = i_ctx->a7; ++ regs->a6 = i_ctx->a6; ++ csr_write(CSR_SEPC, i_ctx->sepc); ++} ++ ++static bool sse_event_is_ready(struct sbi_sse_event *e) ++{ ++ if (!sse_event_pending(e) || ++ sse_event_state(e) != SBI_SSE_STATE_ENABLED || ++ e->attrs.hartid != current_hartid()) { ++ return false; ++ } ++ ++ return true; ++} ++ ++static bool sse_event_check_inject(struct sbi_sse_event *e, ++ struct sbi_trap_regs *regs) ++{ ++ /* ++ * List of event is ordered by priority, stop at first running ++ * event since all other events after this one are of lower ++ * priority. This means an event of higher priority is already ++ * running. ++ */ ++ if (sse_event_state(e) == SBI_SSE_STATE_RUNNING) { ++ return true; ++ } ++ ++ if (sse_event_is_ready(e)) { ++ sse_event_inject(e, regs); ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Return true if an event has been injected, false otherwise */ ++void sbi_sse_process_pending_events(struct sbi_trap_regs *regs) ++{ ++ bool ret; ++ struct sbi_sse_event *e; ++ struct sse_hart_state *state = sse_thishart_state_ptr(); ++ ++ /* if sse is masked on this hart, do nothing */ ++ if (state->masked) ++ return; ++ ++ spin_lock(&state->enabled_event_lock); ++ ++ sbi_list_for_each_entry(e, &state->enabled_event_list, node) { ++ ret = sse_event_check_inject(e, regs); ++ if (ret) ++ goto out; ++ } ++ ++out: ++ spin_unlock(&state->enabled_event_lock); ++} ++ ++static int sse_event_set_pending(struct sbi_sse_event *e) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_RUNNING && ++ sse_event_state(e) != SBI_SSE_STATE_ENABLED) ++ return SBI_EINVALID_STATE; ++ ++ e->attrs.status |= BIT(SBI_SSE_ATTR_STATUS_PENDING_OFFSET); ++ ++ return SBI_OK; ++} ++ ++static void sse_ipi_inject_process(struct sbi_scratch *scratch) ++{ ++ struct sbi_sse_event *e; ++ struct sse_ipi_inject_data evt; ++ struct sbi_fifo *sse_inject_fifo_r = ++ sbi_scratch_offset_ptr(scratch, sse_inject_fifo_off); ++ ++ /* Mark all queued events as pending */ ++ while (!sbi_fifo_dequeue(sse_inject_fifo_r, &evt)) { ++ if (sse_event_get(evt.event_id, &e)) ++ continue; ++ ++ sse_event_set_pending(e); ++ sse_event_put(e); ++ } ++} ++ ++static struct sbi_ipi_event_ops sse_ipi_inject_ops = { ++ .name = "IPI_SSE_INJECT", ++ .process = sse_ipi_inject_process, ++}; ++ ++static int sse_ipi_inject_send(unsigned long hartid, uint32_t event_id) ++{ ++ int ret; ++ struct sbi_scratch *remote_scratch = NULL; ++ struct sse_ipi_inject_data evt = {event_id}; ++ struct sbi_fifo *sse_inject_fifo_r; ++ ++ remote_scratch = sbi_hartid_to_scratch(hartid); ++ if (!remote_scratch) ++ return SBI_EINVAL; ++ sse_inject_fifo_r = ++ sbi_scratch_offset_ptr(remote_scratch, sse_inject_fifo_off); ++ ++ ret = sbi_fifo_enqueue(sse_inject_fifo_r, &evt); ++ if (ret) ++ return SBI_EFAIL; ++ ++ ret = sbi_ipi_send_many(1, hartid, sse_ipi_inject_event, NULL); ++ if (ret) ++ return SBI_EFAIL; ++ ++ return SBI_OK; ++} ++ ++static int sse_inject_event(uint32_t event_id, unsigned long hartid) ++{ ++ int ret; ++ struct sbi_sse_event *e; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ /* In case of global event, provided hart_id is ignored */ ++ if (sse_event_is_global(e)) ++ hartid = e->attrs.hartid; ++ ++ /* Event is for another hart, send it through IPI */ ++ if (hartid != current_hartid()) { ++ sse_event_put(e); ++ return sse_ipi_inject_send(hartid, event_id); ++ } ++ ++ ret = sse_event_set_pending(e); ++ sse_event_put(e); ++ if (ret) ++ return ret; ++ ++ return SBI_OK; ++} ++ ++/** ++ * Must be called under owner hart lock ++ */ ++static int sse_event_enable(struct sbi_sse_event *e) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_REGISTERED) ++ return SBI_EINVALID_STATE; ++ ++ sse_event_set_state(e, SBI_SSE_STATE_ENABLED); ++ sse_event_add_to_list(e); ++ ++ sse_event_invoke_cb(e, enable_cb); ++ ++ if (sse_event_is_global(e) && sse_event_pending(e)) ++ sbi_ipi_send_many(1, e->attrs.hartid, sse_ipi_inject_event, ++ NULL); ++ ++ return SBI_OK; ++} ++ ++static int sse_event_complete(struct sbi_sse_event *e, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) ++{ ++ if (sse_event_state(e) != SBI_SSE_STATE_RUNNING) ++ return SBI_EINVALID_STATE; ++ ++ if (e->attrs.hartid != current_hartid()) ++ return SBI_EINVAL; ++ ++ sse_event_set_state(e, SBI_SSE_STATE_ENABLED); ++ if (e->attrs.config & SBI_SSE_ATTR_CONFIG_ONESHOT) ++ sse_event_disable(e); ++ ++ sse_event_invoke_cb(e, complete_cb); ++ ++ sse_event_resume(e, regs); ++ out->skip_regs_update = true; ++ ++ return SBI_OK; ++} ++ ++int sbi_sse_complete(struct sbi_trap_regs *regs, struct sbi_ecall_return *out) ++{ ++ int ret = SBI_OK; ++ struct sbi_sse_event *tmp; ++ struct sse_hart_state *state = sse_thishart_state_ptr(); ++ ++ spin_lock(&state->enabled_event_lock); ++ sbi_list_for_each_entry(tmp, &state->enabled_event_list, node) { ++ /* ++ * List of event is ordered by priority, first one running is ++ * the one that needs to be completed ++ */ ++ if (sse_event_state(tmp) == SBI_SSE_STATE_RUNNING) { ++ ret = sse_event_complete(tmp, regs, out); ++ break; ++ } ++ } ++ spin_unlock(&state->enabled_event_lock); ++ ++ return ret; ++} ++ ++int sbi_sse_enable(uint32_t event_id) ++{ ++ int ret; ++ struct sbi_sse_event *e; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ sse_enabled_event_lock(e); ++ ret = sse_event_enable(e); ++ sse_enabled_event_unlock(e); ++ sse_event_put(e); ++ ++ return ret; ++} ++ ++int sbi_sse_disable(uint32_t event_id) ++{ ++ int ret; ++ struct sbi_sse_event *e; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ sse_enabled_event_lock(e); ++ ret = sse_event_disable(e); ++ sse_enabled_event_unlock(e); ++ ++ sse_event_put(e); ++ ++ return ret; ++} ++ ++int sbi_sse_hart_mask(void) ++{ ++ struct sse_hart_state *state = sse_thishart_state_ptr(); ++ ++ if (!state) ++ return SBI_EFAIL; ++ ++ if (state->masked) ++ return SBI_EALREADY_STOPPED; ++ ++ state->masked = true; ++ ++ return SBI_SUCCESS; ++} ++ ++int sbi_sse_hart_unmask(void) ++{ ++ struct sse_hart_state *state = sse_thishart_state_ptr(); ++ ++ if (!state) ++ return SBI_EFAIL; ++ ++ if (!state->masked) ++ return SBI_EALREADY_STARTED; ++ ++ state->masked = false; ++ ++ return SBI_SUCCESS; ++} ++ ++int sbi_sse_inject_from_ecall(uint32_t event_id, unsigned long hartid, ++ struct sbi_ecall_return *out) ++{ ++ if (!sbi_domain_is_assigned_hart(sbi_domain_thishart_ptr(), ++ sbi_hartid_to_hartindex(hartid))) ++ return SBI_EINVAL; ++ ++ return sse_inject_event(event_id, hartid); ++} ++ ++int sbi_sse_inject_event(uint32_t event_id) ++{ ++ return sse_inject_event(event_id, current_hartid()); ++} ++ ++int sbi_sse_add_event(uint32_t event_id, const struct sbi_sse_cb_ops *cb_ops) ++{ ++ struct sse_event_info *info; ++ ++ /* Do not allow adding an event twice */ ++ info = sse_event_info_get(event_id); ++ if (info) ++ return SBI_EALREADY; ++ ++ if (cb_ops && cb_ops->set_hartid_cb && !EVENT_IS_GLOBAL(event_id)) ++ return SBI_EINVAL; ++ ++ info = sbi_zalloc(sizeof(*info)); ++ if (!info) ++ return SBI_ENOMEM; ++ ++ info->cb_ops = cb_ops; ++ info->event_id = event_id; ++ ++ SBI_SLIST_ADD(info, supported_events); ++ ++ return SBI_OK; ++} ++ ++int sbi_sse_attr_check(uint32_t base_attr_id, uint32_t attr_count, ++ unsigned long phys_lo, unsigned long phys_hi, ++ unsigned long access) ++{ ++ const unsigned align = __riscv_xlen >> 3; ++ uint64_t end_id = (uint64_t)base_attr_id + (attr_count - 1); ++ ++ if (attr_count == 0) ++ return SBI_ERR_INVALID_PARAM; ++ ++ if (end_id >= SBI_SSE_ATTR_MAX) ++ return SBI_EBAD_RANGE; ++ ++ if (phys_lo & (align - 1)) ++ return SBI_EINVALID_ADDR; ++ ++ /* ++ * On RV32, the M-mode can only access the first 4GB of ++ * the physical address space because M-mode does not have ++ * MMU to access full 34-bit physical address space. ++ * ++ * Based on above, we simply fail if the upper 32bits of ++ * the physical address (i.e. a2 register) is non-zero on ++ * RV32. ++ */ ++ if (phys_hi) ++ return SBI_EINVALID_ADDR; ++ ++ if (!sbi_domain_check_addr(sbi_domain_thishart_ptr(), phys_lo, ++ PRV_S, access)) ++ return SBI_EINVALID_ADDR; ++ ++ return SBI_OK; ++} ++ ++static void copy_attrs(unsigned long *out, const unsigned long *in, ++ unsigned int long_count) ++{ ++ int i = 0; ++ ++ /* ++ * sbi_memcpy() does byte-per-byte copy, using this yields long-per-long ++ * copy ++ */ ++ for (i = 0; i < long_count; i++) ++ out[i] = in[i]; ++} ++ ++int sbi_sse_read_attrs(uint32_t event_id, uint32_t base_attr_id, ++ uint32_t attr_count, unsigned long output_phys_lo, ++ unsigned long output_phys_hi) ++{ ++ int ret; ++ unsigned long *e_attrs; ++ struct sbi_sse_event *e; ++ unsigned long *attrs; ++ ++ ret = sbi_sse_attr_check(base_attr_id, attr_count, output_phys_lo, ++ output_phys_hi, SBI_DOMAIN_WRITE); ++ if (ret) ++ return ret; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ /* 未支持smepmp的情况下无需映射 */ ++ //sbi_hart_map_saddr(output_phys_lo, sizeof(unsigned long) * attr_count); ++ ++ /* ++ * Copy all attributes at once since struct sse_event_attrs is matching ++ * the SBI_SSE_ATTR_* attributes. READ_ATTR is used in SSE handling path ++ * to retrieve the value of registers when interrupted. Rather than ++ * doing multiple SBI calls a single one is done allowing to retrieve ++ * them all at once. ++ */ ++ e_attrs = (unsigned long *)&e->attrs; ++ attrs = (unsigned long *)output_phys_lo; ++ copy_attrs(attrs, &e_attrs[base_attr_id], attr_count); ++ ++ // sbi_hart_unmap_saddr(); ++ ++ sse_event_put(e); ++ ++ return SBI_OK; ++} ++ ++static int sse_write_attrs(struct sbi_sse_event *e, uint32_t base_attr_id, ++ uint32_t attr_count, unsigned long input_phys) ++{ ++ int ret = 0; ++ unsigned long attr = 0, val; ++ uint32_t id, end_id = base_attr_id + attr_count; ++ unsigned long *attrs = (unsigned long *)input_phys; ++ ++ ++ for (id = base_attr_id; id < end_id; id++) { ++ val = attrs[attr++]; ++ ret = sse_event_set_attr_check(e, id, val); ++ if (ret) ++ goto out; ++ } ++ ++ attr = 0; ++ for (id = base_attr_id; id < end_id; id++) { ++ val = attrs[attr++]; ++ sse_event_set_attr(e, id, val); ++ } ++ ++out: ++ ++ return ret; ++} ++ ++int sbi_sse_write_attrs(uint32_t event_id, uint32_t base_attr_id, ++ uint32_t attr_count, unsigned long input_phys_lo, ++ unsigned long input_phys_hi) ++{ ++ int ret = 0; ++ struct sbi_sse_event *e; ++ ++ ret = sbi_sse_attr_check(base_attr_id, attr_count, input_phys_lo, ++ input_phys_hi, SBI_DOMAIN_READ); ++ if (ret) ++ return ret; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ ret = sse_write_attrs(e, base_attr_id, attr_count, input_phys_lo); ++ sse_event_put(e); ++ ++ return ret; ++} ++ ++int sbi_sse_register(uint32_t event_id, unsigned long handler_entry_pc, ++ unsigned long handler_entry_arg) ++{ ++ int ret; ++ struct sbi_sse_event *e; ++ ++ if (handler_entry_pc & 0x1) ++ return SBI_EINVAL; ++ ++ if (!sbi_domain_check_addr(sbi_domain_thishart_ptr(), handler_entry_pc, ++ PRV_S, SBI_DOMAIN_EXECUTE)) ++ return SBI_EINVALID_ADDR; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ ret = sse_event_register(e, handler_entry_pc, handler_entry_arg); ++ sse_event_put(e); ++ ++ return ret; ++} ++ ++int sbi_sse_unregister(uint32_t event_id) ++{ ++ int ret; ++ struct sbi_sse_event *e; ++ ++ ret = sse_event_get(event_id, &e); ++ if (ret) ++ return ret; ++ ++ ret = sse_event_unregister(e); ++ sse_event_put(e); ++ ++ return ret; ++} ++ ++static void sse_event_init(struct sbi_sse_event *e, struct sse_event_info *info) ++{ ++ e->event_id = info->event_id; ++ e->info = info; ++ e->hartindex = sbi_hartid_to_hartindex(current_hartid()); ++ e->attrs.hartid = current_hartid(); ++ /* Declare all events as injectable */ ++ e->attrs.status |= BIT(SBI_SSE_ATTR_STATUS_INJECT_OFFSET); ++} ++ ++static void sse_event_count_init() ++{ ++ struct sse_event_info *info; ++ ++ SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) { ++ if (EVENT_IS_GLOBAL(info->event_id)) ++ global_event_count++; ++ else ++ local_event_count++; ++ } ++} ++ ++static int sse_global_init() ++{ ++ struct sbi_sse_event *e; ++ unsigned int ev = 0; ++ struct sse_event_info *info; ++ ++ global_events = sbi_zalloc(sizeof(*global_events) * global_event_count); ++ if (!global_events) ++ return SBI_ENOMEM; ++ ++ SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) { ++ if (!EVENT_IS_GLOBAL(info->event_id)) ++ continue; ++ ++ e = &global_events[ev].event; ++ sse_event_init(e, info); ++ SPIN_LOCK_INIT(global_events[ev].lock); ++ ++ ev++; ++ } ++ ++ return 0; ++} ++ ++static void sse_local_init(struct sse_hart_state *shs) ++{ ++ unsigned int ev = 0; ++ struct sse_event_info *info; ++ ++ SBI_INIT_LIST_HEAD(&shs->enabled_event_list); ++ SPIN_LOCK_INIT(shs->enabled_event_lock); ++ ++ SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) { ++ if (EVENT_IS_GLOBAL(info->event_id)) ++ continue; ++ sse_event_init(&shs->local_events[ev++], info); ++ } ++} ++ ++int sbi_sse_init(struct sbi_scratch *scratch, bool cold_boot) ++{ ++ int ret; ++ void *sse_inject_mem; ++ struct sse_hart_state *shs; ++ struct sbi_fifo *sse_inject_q; ++ ++ if (cold_boot) { ++ sse_event_count_init(); ++ ++ ret = sse_global_init(); ++ if (ret) ++ return ret; ++ ++ shs_ptr_off = sbi_scratch_alloc_offset(sizeof(void *)); ++ if (!shs_ptr_off) ++ return SBI_ENOMEM; ++ ++ sse_inject_fifo_off = ++ sbi_scratch_alloc_offset(sizeof(*sse_inject_q)); ++ if (!sse_inject_fifo_off) { ++ sbi_scratch_free_offset(shs_ptr_off); ++ return SBI_ENOMEM; ++ } ++ ++ sse_inject_fifo_mem_off = sbi_scratch_alloc_offset( ++ (global_event_count + local_event_count) * ++ sizeof(struct sse_ipi_inject_data)); ++ if (!sse_inject_fifo_mem_off) { ++ sbi_scratch_free_offset(sse_inject_fifo_off); ++ sbi_scratch_free_offset(shs_ptr_off); ++ return SBI_ENOMEM; ++ } ++ ++ ret = sbi_ipi_event_create(&sse_ipi_inject_ops); ++ if (ret < 0) { ++ sbi_scratch_free_offset(shs_ptr_off); ++ return ret; ++ } ++ sse_ipi_inject_event = ret; ++ } ++ ++ shs = sse_get_hart_state_ptr(scratch); ++ if (!shs) { ++ /* Allocate per hart state and local events at once */ ++ shs = sbi_zalloc(sizeof(*shs) + sizeof(struct sbi_sse_event) * ++ local_event_count); ++ if (!shs) ++ return SBI_ENOMEM; ++ ++ shs->local_events = (struct sbi_sse_event *)(shs + 1); ++ ++ /* SSE events are masked until hart unmasks them */ ++ shs->masked = true; ++ ++ sse_set_hart_state_ptr(scratch, shs); ++ } ++ ++ sse_local_init(shs); ++ ++ sse_inject_q = sbi_scratch_offset_ptr(scratch, sse_inject_fifo_off); ++ sse_inject_mem = ++ sbi_scratch_offset_ptr(scratch, sse_inject_fifo_mem_off); ++ ++ sbi_fifo_init(sse_inject_q, sse_inject_mem, ++ (global_event_count + local_event_count), ++ sizeof(struct sse_ipi_inject_data)); ++ ++ return 0; ++} ++ ++void sbi_sse_exit(struct sbi_scratch *scratch) ++{ ++ struct sbi_sse_event *e; ++ struct sse_event_info *info; ++ ++ SBI_SLIST_FOR_EACH_ENTRY(info, supported_events) { ++ if (sse_event_get(info->event_id, &e)) ++ continue; ++ ++ if (e->attrs.hartid != current_hartid()) ++ goto skip; ++ ++ if (sse_event_state(e) > SBI_SSE_STATE_REGISTERED) ++ sse_event_set_state(e, SBI_SSE_STATE_UNUSED); ++ ++skip: ++ sse_event_put(e); ++ } ++} +diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c +index 4a7ff93a..e46e4e78 100644 +--- a/lib/sbi/sbi_trap.c ++++ b/lib/sbi/sbi_trap.c +@@ -278,7 +278,7 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) + mtinst = csr_read(CSR_MTINST); + } + int hartid = csr_read(CSR_MHARTID); +- m_mode_status[hartid] = 1; ++ m_mode_status[hartid] = 1; + + if (mcause & (1UL << (__riscv_xlen - 1))) { + mcause &= ~(1UL << (__riscv_xlen - 1)); +@@ -297,13 +297,14 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) + rc = sbi_irqchip_process(regs); + if (rc) + goto trap_error; ++ break; + default: + msg = "unhandled external interrupt"; + goto trap_error; + }; + hartid = csr_read(CSR_MHARTID); +- m_mode_status[hartid] = 0; +- return regs; ++ m_mode_status[hartid] = 0; ++ return regs; + } + + switch (mcause) { +@@ -355,8 +356,12 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) + trap_error: + if (rc) + sbi_trap_error(msg, rc, mcause, mtval, mtval2, mtinst, regs); ++ ++ if (sbi_mstatus_prev_mode(regs->mstatus) != PRV_M) ++ sbi_sse_process_pending_events(regs); ++ + hartid = csr_read(CSR_MHARTID); +- m_mode_status[hartid] = 0; ++ m_mode_status[hartid] = 0; + return regs; + } + +diff --git a/lib/utils/fdt/fdt_driver.c b/lib/utils/fdt/fdt_driver.c +new file mode 100644 +index 00000000..45d43fcc +--- /dev/null ++++ b/lib/utils/fdt/fdt_driver.c +@@ -0,0 +1,93 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 SiFive ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++extern unsigned long fdt_early_drivers_size; ++ ++int fdt_driver_init_by_offset(const void *fdt, int nodeoff, ++ const struct fdt_driver *const *drivers) ++{ ++ const struct fdt_driver *driver; ++ const struct fdt_match *match; ++ int compat_len, prop_len, rc; ++ const char *compat_str; ++ ++ if (!fdt_node_is_enabled((void *)fdt, nodeoff)) ++ return SBI_ENODEV; ++ ++ compat_str = fdt_getprop(fdt, nodeoff, "compatible", &prop_len); ++ if (!compat_str) ++ return SBI_ENODEV; ++ ++ while ((compat_len = strnlen(compat_str, prop_len) + 1) <= prop_len) { ++ for (int i = 0; i < fdt_early_drivers_size; i++) { ++ driver = drivers[i]; ++ for (match = driver->match_table; match->compatible; match++) { ++ if (!memcmp(match->compatible, compat_str, compat_len)) ++ goto found; ++ } ++ } ++ ++ compat_str += compat_len; ++ prop_len -= compat_len; ++ } ++ ++ return SBI_ENODEV; ++ ++found: ++ if (driver->experimental) ++ sbi_printf("WARNING: %s driver is experimental and may change\n", ++ match->compatible); ++ ++ rc = driver->init(fdt, nodeoff, match); ++ if (rc < 0) { ++ const char *name; ++ ++ name = fdt_get_name(fdt, nodeoff, NULL); ++ sbi_printf("%s: %s (%s) init failed: %d\n", ++ __func__, name, match->compatible, rc); ++ } ++ ++ return rc; ++} ++ ++static int fdt_driver_init_scan(const void *fdt, ++ const struct fdt_driver *const *drivers, ++ bool one) ++{ ++ int nodeoff, rc; ++ ++ for (nodeoff = fdt_next_node(fdt, -1, NULL); ++ nodeoff >= 0; ++ nodeoff = fdt_next_node(fdt, nodeoff, NULL)) { ++ rc = fdt_driver_init_by_offset(fdt, nodeoff, drivers); ++ if (rc == SBI_ENODEV) ++ continue; ++ if (rc < 0) ++ return rc; ++ if (one) ++ return 0; ++ } ++ ++ return one ? SBI_ENODEV : 0; ++} ++ ++int fdt_driver_init_all(const void *fdt, ++ const struct fdt_driver *const *drivers) ++{ ++ return fdt_driver_init_scan(fdt, drivers, false); ++} ++ ++int fdt_driver_init_one(const void *fdt, ++ const struct fdt_driver *const *drivers) ++{ ++ return fdt_driver_init_scan(fdt, drivers, true); ++} +diff --git a/lib/utils/fdt/fdt_early_drivers.carray b/lib/utils/fdt/fdt_early_drivers.carray +new file mode 100644 +index 00000000..969e10b1 +--- /dev/null ++++ b/lib/utils/fdt/fdt_early_drivers.carray +@@ -0,0 +1,3 @@ ++HEADER: sbi_utils/fdt/fdt_driver.h ++TYPE: const struct fdt_driver ++NAME: fdt_early_drivers +diff --git a/lib/utils/fdt/fdt_fixup.c b/lib/utils/fdt/fdt_fixup.c +index 41f6cbb7..c7de545d 100644 +--- a/lib/utils/fdt/fdt_fixup.c ++++ b/lib/utils/fdt/fdt_fixup.c +@@ -268,8 +268,10 @@ int fdt_reserved_memory_fixup(void *fdt) + + addr = reg->base; + size = 1UL << reg->order; +- fdt_resv_memory_update_node(fdt, addr, size, i, parent, +- (sbi_hart_pmp_count(scratch)) ? false : true); ++ fdt_resv_memory_update_node( ++ fdt, addr, size, i, parent, ++ (sbi_hart_pmp_count(scratch) >= PMP_COUNT) ? false ++ : true); + i++; + } + +diff --git a/lib/utils/fdt/objects.mk b/lib/utils/fdt/objects.mk +index 5cede811..c01591e1 100644 +--- a/lib/utils/fdt/objects.mk ++++ b/lib/utils/fdt/objects.mk +@@ -4,7 +4,10 @@ + # Copyright (C) 2020 Bin Meng + # + ++libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_early_drivers.o ++ + libsbiutils-objs-$(CONFIG_FDT_DOMAIN) += fdt/fdt_domain.o + libsbiutils-objs-$(CONFIG_FDT_PMU) += fdt/fdt_pmu.o + libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_helper.o ++libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_driver.o + libsbiutils-objs-$(CONFIG_FDT) += fdt/fdt_fixup.o +diff --git a/platform/fpga/ariane/platform.c b/platform/fpga/ariane/platform.c +index 56a666bb..3f963520 100644 +--- a/platform/fpga/ariane/platform.c ++++ b/platform/fpga/ariane/platform.c +@@ -185,5 +185,6 @@ const struct sbi_platform platform = { + .features = SBI_PLATFORM_DEFAULT_FEATURES, + .hart_count = ARIANE_HART_COUNT, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = SBI_PLATFORM_DEFAULT_HEAP_SIZE(ARIANE_HART_COUNT), + .platform_ops_addr = (unsigned long)&platform_ops + }; +diff --git a/platform/fpga/openpiton/platform.c b/platform/fpga/openpiton/platform.c +index 5ff7d200..1baf323e 100644 +--- a/platform/fpga/openpiton/platform.c ++++ b/platform/fpga/openpiton/platform.c +@@ -220,5 +220,7 @@ const struct sbi_platform platform = { + .features = SBI_PLATFORM_DEFAULT_FEATURES, + .hart_count = OPENPITON_DEFAULT_HART_COUNT, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = ++ SBI_PLATFORM_DEFAULT_HEAP_SIZE(OPENPITON_DEFAULT_HART_COUNT), + .platform_ops_addr = (unsigned long)&platform_ops + }; +diff --git a/platform/generic/include/platform_override.h b/platform/generic/include/platform_override.h +index 7f1558dd..ff4302e5 100644 +--- a/platform/generic/include/platform_override.h ++++ b/platform/generic/include/platform_override.h +@@ -11,9 +11,12 @@ + #define __PLATFORM_OVERRIDE_H__ + + #include ++#include + #include + #include + ++int generic_mpxy_init(void); ++ + struct platform_override { + const struct fdt_match *match_table; + u64 (*features)(const struct fdt_match *match); +@@ -27,9 +30,8 @@ struct platform_override { + struct sbi_hart_features *hfeatures); + int (*vendor_ext_check)(long extid, const struct fdt_match *match); + int (*vendor_ext_provider)(long extid, long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_value, +- struct sbi_trap_info *out_trap, ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out, + const struct fdt_match *match); + }; + +diff --git a/platform/generic/platform.c b/platform/generic/platform.c +index bfe15f0d..567a3e10 100644 +--- a/platform/generic/platform.c ++++ b/platform/generic/platform.c +@@ -14,11 +14,13 @@ + #include + #include + #include ++#include + #include + #include + #include + #include + #include ++#include + #include + #include + #include +@@ -111,7 +113,7 @@ unsigned long fw_platform_init(unsigned long arg0, unsigned long arg1, + } + + platform.hart_count = hart_count; +- ++ platform.heap_size = SBI_PLATFORM_DEFAULT_HEAP_SIZE(hart_count); + platform_has_mlevel_imsic = fdt_check_imsic_mlevel(fdt); + + /* Return original FDT pointer */ +@@ -131,6 +133,11 @@ static int generic_nascent_init(void) + + static int generic_early_init(bool cold_boot) + { ++ const void *fdt = fdt_get_address(); ++ ++ if (cold_boot) ++ fdt_driver_init_all(fdt, fdt_early_drivers); ++ + if (!generic_plat || !generic_plat->early_init) + return 0; + +@@ -179,14 +186,12 @@ static int generic_vendor_ext_check(long extid) + } + + static int generic_vendor_ext_provider(long extid, long funcid, +- const struct sbi_trap_regs *regs, +- unsigned long *out_value, +- struct sbi_trap_info *out_trap) ++ struct sbi_trap_regs *regs, ++ struct sbi_ecall_return *out) + { + if (generic_plat && generic_plat->vendor_ext_provider) { +- return generic_plat->vendor_ext_provider(extid, funcid, regs, +- out_value, out_trap, +- generic_plat_match); ++ return generic_plat->vendor_ext_provider( ++ extid, funcid, regs, out, generic_plat_match); + } + + return SBI_ENOTSUPP; +@@ -260,6 +265,13 @@ static int generic_console_init(void) + return fdt_serial_init(); + } + ++int generic_mpxy_init(void) ++{ ++ const void *fdt = fdt_get_address(); ++ ++ return fdt_mpxy_init(fdt); ++} ++ + const struct sbi_platform_operations platform_ops = { + .nascent_init = generic_nascent_init, + .early_init = generic_early_init, +@@ -280,6 +292,7 @@ const struct sbi_platform_operations platform_ops = { + .timer_exit = fdt_timer_exit, + .vendor_ext_check = generic_vendor_ext_check, + .vendor_ext_provider = generic_vendor_ext_provider, ++ .mpxy_init = generic_mpxy_init, + }; + + struct sbi_platform platform = { +@@ -292,5 +305,6 @@ struct sbi_platform platform = { + .hart_count = SBI_HARTMASK_MAX_BITS, + .hart_index2id = generic_hart_index2id, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = SBI_PLATFORM_DEFAULT_HEAP_SIZE(0), + .platform_ops_addr = (unsigned long)&platform_ops + }; +diff --git a/platform/kendryte/k210/platform.c b/platform/kendryte/k210/platform.c +index ef848c7c..18b47586 100644 +--- a/platform/kendryte/k210/platform.c ++++ b/platform/kendryte/k210/platform.c +@@ -196,5 +196,7 @@ const struct sbi_platform platform = { + .features = 0, + .hart_count = K210_HART_COUNT, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = ++ SBI_PLATFORM_DEFAULT_HEAP_SIZE(K210_HART_COUNT), + .platform_ops_addr = (unsigned long)&platform_ops + }; +diff --git a/platform/nuclei/ux600/platform.c b/platform/nuclei/ux600/platform.c +index 2b027347..661a8526 100644 +--- a/platform/nuclei/ux600/platform.c ++++ b/platform/nuclei/ux600/platform.c +@@ -244,5 +244,7 @@ const struct sbi_platform platform = { + .features = SBI_PLATFORM_DEFAULT_FEATURES, + .hart_count = UX600_HART_COUNT, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = ++ SBI_PLATFORM_DEFAULT_HEAP_SIZE(UX600_HART_COUNT), + .platform_ops_addr = (unsigned long)&platform_ops + }; +diff --git a/platform/template/platform.c b/platform/template/platform.c +index f3802dad..4219f221 100644 +--- a/platform/template/platform.c ++++ b/platform/template/platform.c +@@ -152,5 +152,6 @@ const struct sbi_platform platform = { + .features = SBI_PLATFORM_DEFAULT_FEATURES, + .hart_count = 1, + .hart_stack_size = SBI_PLATFORM_DEFAULT_HART_STACK_SIZE, ++ .heap_size = SBI_PLATFORM_DEFAULT_HEAP_SIZE(1), + .platform_ops_addr = (unsigned long)&platform_ops + }; +-- +2.27.0 + diff --git a/0003-lib-sbi-Introduce-high-priority-interrupt-for-RAS.patch b/0003-lib-sbi-Introduce-high-priority-interrupt-for-RAS.patch new file mode 100644 index 0000000000000000000000000000000000000000..67b1bcd11f535dde202e6230b00b81a91a711e2f --- /dev/null +++ b/0003-lib-sbi-Introduce-high-priority-interrupt-for-RAS.patch @@ -0,0 +1,72 @@ +From 6cc26a6cf88f7166589f22df40675a5eb5519174 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Mon, 23 Oct 2023 11:59:07 +0530 +Subject: [PATCH 03/16] lib: sbi: Introduce high priority interrupt for RAS + +- Add high priority interrupt for RAS +- Enable the interrupt + +Reference: https://github.com/ventanamicro/opensbi/commit/c972918ccc1d05e2585d80da057dd7aaa28d2cb3 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi/riscv_encoding.h | 6 ++++++ + lib/sbi/sbi_irqchip.c | 9 +++++++++ + 2 files changed, 15 insertions(+) + +diff --git a/include/sbi/riscv_encoding.h b/include/sbi/riscv_encoding.h +index 2a4e363c..af638212 100644 +--- a/include/sbi/riscv_encoding.h ++++ b/include/sbi/riscv_encoding.h +@@ -93,6 +93,7 @@ + #define IRQ_M_EXT 11 + #define IRQ_S_GEXT 12 + #define IRQ_PMU_OVF 13 ++#define IRQ_RASHP_INT 43 + + #define MIP_SSIP (_UL(1) << IRQ_S_SOFT) + #define MIP_VSSIP (_UL(1) << IRQ_VS_SOFT) +@@ -105,6 +106,11 @@ + #define MIP_MEIP (_UL(1) << IRQ_M_EXT) + #define MIP_SGEIP (_UL(1) << IRQ_S_GEXT) + #define MIP_LCOFIP (_UL(1) << IRQ_PMU_OVF) ++#if __riscv_xlen == 64 ++#define MIP_RASHP_INTP (_UL(1) << IRQ_RASHP_INT) ++#else ++#define MIPH_RASHP_INTP (_UL(1) << (IRQ_RASHP_INT - 32)) ++#endif + + #define SIP_SSIP MIP_SSIP + #define SIP_STIP MIP_STIP +diff --git a/lib/sbi/sbi_irqchip.c b/lib/sbi/sbi_irqchip.c +index 24128bec..bcd2f156 100644 +--- a/lib/sbi/sbi_irqchip.c ++++ b/lib/sbi/sbi_irqchip.c +@@ -7,6 +7,7 @@ + * Anup Patel + */ + ++#include + #include + #include + +@@ -40,6 +41,14 @@ int sbi_irqchip_init(struct sbi_scratch *scratch, bool cold_boot) + if (ext_irqfn != default_irqfn) + csr_set(CSR_MIE, MIP_MEIP); + ++ if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMAIA)) { ++#if __riscv_xlen == 32 ++ csr_set(CSR_MIEH, MIPH_RASHP_INTP); ++#else ++ csr_set(CSR_MIE, MIP_RASHP_INTP); ++#endif ++ } ++ + return 0; + } + +-- +2.27.0 + diff --git a/0004-lib-sbi-Introduce-RAS-common-interface-driver.patch b/0004-lib-sbi-Introduce-RAS-common-interface-driver.patch new file mode 100644 index 0000000000000000000000000000000000000000..b6103f0806db79de5ef72b5f2d7da03ce903e1a9 --- /dev/null +++ b/0004-lib-sbi-Introduce-RAS-common-interface-driver.patch @@ -0,0 +1,177 @@ +From 3ca3a5450d616ebb4e8a2a6e564a3fb3e374a039 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Mon, 23 Oct 2023 12:46:07 +0530 +Subject: [PATCH 04/16] lib: sbi: Introduce RAS common interface driver + +- Add a RAS driver interface framework +- The actual RAS drivers will set/get the RAS agent to + the interface +- SBI makes the RAS agent calls through this interface + +Reference: https://github.com/ventanamicro/opensbi/commit/c3672283577ca9d9a1ab339c606f68b2b0f3eead + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi/sbi_ras.h | 41 ++++++++++++++++++++++++++++++++ + lib/sbi/objects.mk | 1 + + lib/sbi/sbi_init.c | 5 ++++ + lib/sbi/sbi_ras.c | 55 +++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 102 insertions(+) + create mode 100644 include/sbi/sbi_ras.h + create mode 100644 lib/sbi/sbi_ras.c + +diff --git a/include/sbi/sbi_ras.h b/include/sbi/sbi_ras.h +new file mode 100644 +index 00000000..890caf04 +--- /dev/null ++++ b/include/sbi/sbi_ras.h +@@ -0,0 +1,41 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#ifndef __SBI_RAS_H__ ++#define __SBI_RAS_H__ ++ ++#include ++ ++/** RAS Agent */ ++struct sbi_ras_agent { ++ /** Name of the RAS agent */ ++ char name[32]; ++ ++ /** probe - returns register width if implemented, 0 otherwise */ ++ int (*ras_probe)(void); ++ ++ /** synchronize CPU errors */ ++ int (*ras_sync_hart_errs)(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining); ++ ++ /** synchronize device errors */ ++ int (*ras_sync_dev_errs)(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining); ++}; ++ ++int sbi_ras_probe(void); ++int sbi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining); ++int sbi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining); ++ ++const struct sbi_ras_agent *sbi_ras_get_agent(void); ++void sbi_ras_set_agent(const struct sbi_ras_agent *agent); ++ ++#endif +diff --git a/lib/sbi/objects.mk b/lib/sbi/objects.mk +index e0c57631..ec9453ff 100644 +--- a/lib/sbi/objects.mk ++++ b/lib/sbi/objects.mk +@@ -79,6 +79,7 @@ libsbi-objs-y += sbi_trap.o + libsbi-objs-y += sbi_unpriv.o + libsbi-objs-y += sbi_expected_trap.o + libsbi-objs-y += sbi_pmp.o ++libsbi-objs-y += sbi_ras.o + + ## Add by Dong Du + # The Penglai related files here +diff --git a/lib/sbi/sbi_init.c b/lib/sbi/sbi_init.c +index 1b378d2f..d0afce20 100644 +--- a/lib/sbi/sbi_init.c ++++ b/lib/sbi/sbi_init.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -73,6 +74,7 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch) + const struct sbi_timer_device *tdev; + const struct sbi_console_device *cdev; + const struct sbi_system_reset_device *srdev; ++ const struct sbi_ras_agent *ras_dev; + const struct sbi_platform *plat = sbi_platform_ptr(scratch); + + if (scratch->options & SBI_SCRATCH_NO_BOOT_PRINTS) +@@ -107,6 +109,9 @@ static void sbi_boot_print_general(struct sbi_scratch *scratch) + srdev = sbi_system_reset_get_device(SBI_SRST_RESET_TYPE_SHUTDOWN, 0); + sbi_printf("Platform Shutdown Device : %s\n", + (srdev) ? srdev->name : "---"); ++ ras_dev = sbi_ras_get_agent(); ++ sbi_printf("Platform RAS Device : %s\n", ++ (ras_dev) ? ras_dev->name : "---"); + + /* Firmware details */ + sbi_printf("Firmware Base : 0x%lx\n", scratch->fw_start); +diff --git a/lib/sbi/sbi_ras.c b/lib/sbi/sbi_ras.c +new file mode 100644 +index 00000000..2015502b +--- /dev/null ++++ b/lib/sbi/sbi_ras.c +@@ -0,0 +1,55 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2023 Ventana Micro Systems Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++ ++static const struct sbi_ras_agent *ras_agent = NULL; ++ ++const struct sbi_ras_agent *sbi_ras_get_agent(void) ++{ ++ return ras_agent; ++} ++ ++void sbi_ras_set_agent(const struct sbi_ras_agent *agent) ++{ ++ if (!agent || ras_agent) ++ return; ++ ++ ras_agent = agent; ++} ++ ++int sbi_ras_probe(void) ++{ ++ if (!ras_agent || !ras_agent->ras_probe) ++ return SBI_EFAIL; ++ ++ return ras_agent->ras_probe(); ++} ++ ++int sbi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ if (!ras_agent) ++ return SBI_EFAIL; ++ ++ return ras_agent->ras_sync_hart_errs(pending_vectors, nr_pending, ++ nr_remaining); ++} ++ ++int sbi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ if (!ras_agent) ++ return SBI_EFAIL; ++ ++ return ras_agent->ras_sync_dev_errs(pending_vectors, nr_pending, ++ nr_remaining); ++} +-- +2.27.0 + diff --git a/0005-lib-utils-Introduce-RAS-RPMI-based-driver.patch b/0005-lib-utils-Introduce-RAS-RPMI-based-driver.patch new file mode 100644 index 0000000000000000000000000000000000000000..15034aab90dbdd1fa3472844c2457d530745832c --- /dev/null +++ b/0005-lib-utils-Introduce-RAS-RPMI-based-driver.patch @@ -0,0 +1,286 @@ +From 0381b4bea9f8d349801cfb09a106eda615f0dc66 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Mon, 23 Oct 2023 12:55:57 +0530 +Subject: [PATCH 05/16] lib: utils: Introduce RAS RPMI based driver + +- Add RAS driver which communicates to RAS agent over RPMI +- The driver is enumarated from the FDT entry + +Reference: https://github.com/ventanamicro/opensbi/commit/8b09e84fa12a94351026d6e4da06e8082be57579 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/mailbox/rpmi_msgprot.h | 48 ++++++++ + lib/utils/Kconfig | 2 + + lib/utils/ras/Kconfig | 18 +++ + lib/utils/ras/fdt_ras_rpmi.c | 142 +++++++++++++++++++++++ + lib/utils/ras/objects.mk | 11 ++ + 5 files changed, 221 insertions(+) + create mode 100644 lib/utils/ras/Kconfig + create mode 100644 lib/utils/ras/fdt_ras_rpmi.c + create mode 100644 lib/utils/ras/objects.mk + +diff --git a/include/sbi_utils/mailbox/rpmi_msgprot.h b/include/sbi_utils/mailbox/rpmi_msgprot.h +index a761b560..c79070a6 100644 +--- a/include/sbi_utils/mailbox/rpmi_msgprot.h ++++ b/include/sbi_utils/mailbox/rpmi_msgprot.h +@@ -703,4 +703,52 @@ struct rpmi_clock_get_rate_resp { + u32 clock_rate_high; + }; + ++/** RPMI RAS-Agent ServiceGroup Service IDs */ ++enum rpmi_ras_service_id { ++ RPMI_RAS_SRV_PROBE_REQ = 0x01, ++ RPMI_RAS_SRV_SYNC_HART_ERR_REQ, ++ RPMI_RAS_SRV_SYNC_DEV_ERR_REQ, ++ RPMI_RAS_SRV_GET_PEND_VECS_REQ, ++ RPMI_RAS_SRV_SYNC_ERR_RESP, ++ RPMI_RAS_SRV_MAX_COUNT, ++}; ++ ++struct rpmi_ras_probe_req { ++ u32 dummy; ++}; ++ ++struct rpmi_ras_probe_resp { ++ s32 status; ++ u32 version; ++}; ++ ++struct rpmi_ras_sync_hart_err_req { ++ u32 hart_id; ++}; ++ ++struct rpmi_ras_sync_dev_err_req { ++ u32 dummy; ++}; ++ ++struct rpmi_ras_pend_vecs_req { ++#define INVALID_LAST_VEC 0xFFFFFFFFUL ++ u32 last_vec; ++}; ++ ++/* ++ * List of vectors needing attention. These might be ++ * more than that can be sent in single message. ++ * ++ * `remaining` will contain the number of vectors ++ * remaining. SBI implementation should request ++ * remaining vectors by GET_PEND_VECS request. ++ */ ++struct rpmi_ras_sync_err_resp { ++ s32 status; ++ u32 remaining; ++ u32 returned; ++#define MAX_PEND_VECS ((RPMI_MSG_DATA_SIZE(RPMI_SLOT_SIZE_MIN) - (sizeof(u32) * 3)) / sizeof(u32)) ++ u32 pending_vecs[MAX_PEND_VECS]; ++}; ++ + #endif /* !__RPMI_MSGPROT_H__ */ +diff --git a/lib/utils/Kconfig b/lib/utils/Kconfig +index 5a71e750..db05f5f8 100644 +--- a/lib/utils/Kconfig ++++ b/lib/utils/Kconfig +@@ -14,6 +14,8 @@ source "$(OPENSBI_SRC_DIR)/lib/utils/irqchip/Kconfig" + + source "$(OPENSBI_SRC_DIR)/lib/utils/libfdt/Kconfig" + ++source "$(OPENSBI_SRC_DIR)/lib/utils/ras/Kconfig" ++ + source "$(OPENSBI_SRC_DIR)/lib/utils/reset/Kconfig" + + source "$(OPENSBI_SRC_DIR)/lib/utils/serial/Kconfig" +diff --git a/lib/utils/ras/Kconfig b/lib/utils/ras/Kconfig +new file mode 100644 +index 00000000..08eb47e4 +--- /dev/null ++++ b/lib/utils/ras/Kconfig +@@ -0,0 +1,18 @@ ++# SPDX-License-Identifier: BSD-2-Clause ++ ++menu "RAS Agent Driver Support" ++ ++config FDT_RAS ++ bool "FDT based RAS drivers" ++ depends on FDT ++ default y ++ ++if FDT_RAS ++ ++config FDT_RAS_RPMI ++ bool "FDT RPMI RAS driver" ++ default y ++ ++endif ++ ++endmenu +diff --git a/lib/utils/ras/fdt_ras_rpmi.c b/lib/utils/ras/fdt_ras_rpmi.c +new file mode 100644 +index 00000000..8649d90b +--- /dev/null ++++ b/lib/utils/ras/fdt_ras_rpmi.c +@@ -0,0 +1,142 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2025 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct rpmi_ras { ++ struct mbox_chan *chan; ++}; ++ ++static struct rpmi_ras ras; ++ ++static int rpmi_ras_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ int rc = SBI_SUCCESS; ++ struct rpmi_ras_sync_hart_err_req req; ++ struct rpmi_ras_sync_err_resp resp; ++ ++ if (!pending_vectors || !nr_pending || !nr_remaining) ++ return SBI_ERR_INVALID_PARAM; ++ ++ *nr_pending = *nr_remaining = 0; ++ ++ if (!ras.chan) ++ return SBI_ERR_INVALID_STATE; ++ ++ req.hart_id = current_hartid(); ++ ++ rc = rpmi_normal_request_with_status(ras.chan, ++ RPMI_RAS_SRV_SYNC_HART_ERR_REQ, ++ &req, rpmi_u32_count(req), ++ rpmi_u32_count(req), ++ &resp, rpmi_u32_count(resp), ++ rpmi_u32_count(resp)); ++ ++ if (rc) { ++ sbi_printf("%s: sync failed, rc: 0x%x\n", __func__, rc); ++ return rc; ++ } ++ ++ if (!resp.status && resp.returned > 0 && resp.returned < MAX_PEND_VECS) { ++ memcpy(pending_vectors, resp.pending_vecs, ++ resp.returned * sizeof(u32)); ++ *nr_pending = resp.returned; ++ *nr_remaining = resp.remaining; ++ } else { ++ if (resp.status) { ++ sbi_printf("%s: sync returned status %d\n", ++ __func__, resp.status); ++ } ++ ++ if (resp.returned < 0 || resp.returned > MAX_PEND_VECS) ++ sbi_printf("%s: invalid vector range returned %u\n", ++ __func__, resp.returned); ++ ++ return SBI_ERR_FAILED; ++ } ++ ++ return SBI_SUCCESS; ++} ++ ++static int rpmi_ras_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ int rc = SBI_SUCCESS; ++ ++ return rc; ++} ++ ++static int rpmi_ras_probe(void) ++{ ++ int rc; ++ struct rpmi_ras_probe_resp resp; ++ struct rpmi_ras_probe_req req; ++ ++ if (!ras.chan) ++ return SBI_ERR_INVALID_STATE; ++ ++ rc = rpmi_normal_request_with_status( ++ ras.chan, RPMI_RAS_SRV_PROBE_REQ, ++ &req, rpmi_u32_count(req), rpmi_u32_count(req), ++ &resp, rpmi_u32_count(resp), rpmi_u32_count(resp)); ++ if (rc) ++ return rc; ++ ++ return 0; ++} ++ ++static struct sbi_ras_agent sbi_rpmi_ras_agent = { ++ .name = "rpmi-ras-agent", ++ .ras_sync_hart_errs = rpmi_ras_sync_hart_errs, ++ .ras_sync_dev_errs = rpmi_ras_sync_dev_errs, ++ .ras_probe = rpmi_ras_probe, ++}; ++ ++static int rpmi_ras_cold_init(const void *fdt, int nodeoff, ++ const struct fdt_match *match) ++{ ++ int rc; ++ ++ if (ras.chan) ++ return 0; ++ ++ /* ++ * If channel request failed then other end does not support ++ * RAS service group so do nothing. ++ */ ++ rc = fdt_mailbox_request_chan(fdt, nodeoff, 0, &ras.chan); ++ if (rc) ++ return rc; ++ ++ sbi_ras_set_agent(&sbi_rpmi_ras_agent); ++ ++ sbi_ras_probe(); ++ ++ return 0; ++} ++ ++static const struct fdt_match rpmi_ras_match[] = { ++ { .compatible = "riscv,rpmi-ras" }, ++ {}, ++}; ++ ++const struct fdt_driver fdt_ras_rpmi = { ++ .match_table = rpmi_ras_match, ++ .init = rpmi_ras_cold_init, ++}; +diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk +new file mode 100644 +index 00000000..2200521e +--- /dev/null ++++ b/lib/utils/ras/objects.mk +@@ -0,0 +1,11 @@ ++# ++# SPDX-License-Identifier: BSD-2-Clause ++# ++# Copyright (c) 2025 Ventana Micro Systems Inc. ++# ++# Authors: ++# Himanshu Chauhan ++# ++ ++carray-fdt_early_drivers-$(CONFIG_FDT_RAS_RPMI) += fdt_ras_rpmi ++libsbiutils-objs-$(CONFIG_FDT_RAS_RPMI) += ras/fdt_ras_rpmi.o +-- +2.27.0 + diff --git a/0006-lib-sbi-Add-RAS-high-priority-interrupt-handler.patch b/0006-lib-sbi-Add-RAS-high-priority-interrupt-handler.patch new file mode 100644 index 0000000000000000000000000000000000000000..0365420a3c2d5fb9a33af8a1325f6b686f04b21c --- /dev/null +++ b/0006-lib-sbi-Add-RAS-high-priority-interrupt-handler.patch @@ -0,0 +1,100 @@ +From 563dff1706f9fd87ff4839e1e8375bc37cff26b4 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Mon, 23 Oct 2023 13:57:32 +0530 +Subject: [PATCH 06/16] lib: sbi: Add RAS high priority interrupt handler + +- Calls RAS drivers to synchronize errors with RAS agent +- Injects SSE events reported by RAS agent + +Reference: https://github.com/ventanamicro/opensbi/commit/db07c51da29a883a3aff71cefa8eaf1196c8cb17 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + lib/sbi/sbi_trap.c | 38 ++++++++++++++++++++++++++++++++++++++ + 1 file changed, 38 insertions(+) + +diff --git a/lib/sbi/sbi_trap.c b/lib/sbi/sbi_trap.c +index e46e4e78..b8a39309 100644 +--- a/lib/sbi/sbi_trap.c ++++ b/lib/sbi/sbi_trap.c +@@ -19,9 +19,13 @@ + #include + #include + #include ++#include + #include + #include + #include ++#include ++#include ++#include + + #include + +@@ -203,6 +207,31 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs, + return 0; + } + ++ ++void sbi_ras_process(void) ++{ ++ int rc; ++ u32 pending_vectors[MAX_PEND_VECS] = { 0xfffffffful }; ++ u32 nr_pending, nr_remaining; ++ ++#if __riscv_xlen == 32 ++ csr_clear(CSR_MIPH, MIPH_RASHP_INTP); ++#else ++ csr_clear(CSR_MIP, MIP_RASHP_INTP); ++#endif ++ ++ rc = sbi_ras_sync_hart_errs(pending_vectors, &nr_pending, &nr_remaining); ++ if (rc) ++ return; ++ ++ for (rc = 0; rc < nr_pending; rc++) ++ if (pending_vectors[rc] == SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS || ++ pending_vectors[rc] == SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS) ++ sbi_sse_inject_event(pending_vectors[rc]); ++ ++ return; ++} ++ + /* static int sbi_trap_nonaia_irq(struct sbi_trap_regs *regs, ulong mcause) + { + mcause &= ~(1UL << (__riscv_xlen - 1)); +@@ -213,6 +242,9 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs, + case IRQ_M_SOFT: + sbi_ipi_process(); + break; ++ case IRQ_RASHP_INT: ++ sbi_ras_process(); ++ break; + case IRQ_M_EXT: + return sbi_irqchip_process(regs); + default: +@@ -241,6 +273,9 @@ int sbi_trap_redirect(struct sbi_trap_regs *regs, + if (rc) + return rc; + break; ++ case IRQ_RASHP_INT: ++ sbi_ras_process(); ++ break; + default: + return SBI_ENOENT; + } +@@ -298,6 +333,9 @@ struct sbi_trap_regs *sbi_trap_handler(struct sbi_trap_regs *regs) + if (rc) + goto trap_error; + break; ++ case IRQ_RASHP_INT: ++ sbi_ras_process(); ++ break; + default: + msg = "unhandled external interrupt"; + goto trap_error; +-- +2.27.0 + diff --git a/0007-include-sbi_utils-Add-reri-register-definitions.patch b/0007-include-sbi_utils-Add-reri-register-definitions.patch new file mode 100644 index 0000000000000000000000000000000000000000..e33edbff11769d70a6eb4da7a1ca05e6c653c41f --- /dev/null +++ b/0007-include-sbi_utils-Add-reri-register-definitions.patch @@ -0,0 +1,186 @@ +From 053b36d06f8f9f72272afd215987d7723d9628ce Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:07:46 +0530 +Subject: [PATCH 07/16] include: sbi_utils: Add reri register definitions + +Add definitions for various reri error sources and error banks. + +Reference: https://github.com/ventanamicro/opensbi/commit/bbe95fc6ac96845ad8a9333c722bf8c99b8d8c45 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/riscv_reri_regs.h | 160 ++++++++++++++++++++++++ + 1 file changed, 160 insertions(+) + create mode 100644 include/sbi_utils/ras/riscv_reri_regs.h + +diff --git a/include/sbi_utils/ras/riscv_reri_regs.h b/include/sbi_utils/ras/riscv_reri_regs.h +new file mode 100644 +index 00000000..ac5e2df4 +--- /dev/null ++++ b/include/sbi_utils/ras/riscv_reri_regs.h +@@ -0,0 +1,160 @@ ++/* ++ * RISC-V RERI Registers Definitions ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 or ++ * (at your option) any later version. ++ */ ++ ++#ifndef __RISCV_RERI_REGS_ ++#define __RISCV_RERI_REGS_ ++ ++#define MAX_ERROR_RECORDS 63 ++ ++/* RERI Error Codes ras-reri-v0.3 (Section 2.7) */ ++enum { ++ RERI_EC_NONE, ++ RERI_EC_OUE, /* Other unspecified error */ ++ RERI_EC_CDA, /* Corrupted data access */ ++ RERI_EC_CBA, /* Cache block data error */ ++ RERI_EC_CSD, /* Cache scrubbing detected */ ++ RERI_EC_CAS, /* Cache address/state error */ ++ RERI_EC_CUE, /* Cache unspecified error */ ++ RERI_EC_SDC, /* Snoop-filter/directory address/ control state */ ++ RERI_EC_SUE, /* Snoop-filter/directory unspecified error */ ++ RERI_EC_TPD, /* TLB/Page-walk cache data */ ++ RERI_EC_TPA, /* TLB/Page-walk address control state */ ++ RERI_EC_TPU, /* TLB/Page-walk unknown error */ ++ RERI_EC_HSE, /* Hart state error */ ++ RERI_EC_ICS, /* Interrupt controller state */ ++ RERI_EC_ITD, /* Interconnect data error */ ++ RERI_EC_ITO, /* Interconnection other error */ ++ RERI_EC_IWE, /* Internal watchdog error */ ++ RERI_EC_IDE, /* Internal datapath/memory or execution unit error */ ++ RERI_EC_SBE, /* System memory command or address bus error */ ++ RERI_EC_SMU, /* System memory unspecified error */ ++ RERI_EC_SMD, /* System memory data error */ ++ RERI_EC_SMS, /* System memory scrubbing detected error */ ++ RERI_EC_PIO, /* Protocol error illegal IO */ ++ RERI_EC_PUS, /* Protocol error unexpected state */ ++ RERI_EC_PTO, /* Protocol error timeout error */ ++ RERI_EC_SIC, /* System internal controller error */ ++ RERI_EC_DPU, /* Deferred error passthrough not supported */ ++ RERI_EC_PCX, /* PCI/CXL detected error */ ++ RERI_EC_RES, /* Reserved errors start */ ++ RERI_EC_REE = 63, /* Reserved errors end */ ++ RERI_EC_CES = 64, /* Custom error start */ ++ RERI_EC_CEE = 255, /* Custom error end */ ++ RERI_EC_INVALID, ++}; ++ ++enum { ++ RERI_TT_UNSPECIFIED, ++ RERI_TT_CUSTOM, ++ RERI_TT_RES1, ++ RERI_TT_RES2, ++ RERI_TT_EXPLICIT_READ, ++ RERI_TT_EXPLICIT_WRITE, ++ RERI_TT_IMPLICIT_READ, ++ RERI_TT_IMPLICIT_WRITE, ++ RERI_TT_INVALID, ++}; ++ ++typedef union riscv_reri_control { ++ struct __attribute__((__packed__)) { ++ uint16_t ele:1; ++ uint16_t cece:1; ++ uint16_t sinv:1; ++ uint16_t rsvd0:1; ++ uint16_t ces:2; ++ uint16_t udes:2; ++ uint16_t uues:2; ++ uint16_t rsvd1:6; ++ ++ uint16_t rsvd2; ++ ++ uint16_t eid; ++ ++ uint16_t rsvd:8; ++ uint16_t cust:8; ++ }; ++ uint64_t value; ++} riscv_reri_control; ++ ++#define RERI_CTRL_MASK 0xFFFF000001FDull ++ ++typedef union riscv_reri_status { ++ struct __attribute__((__packed__)) { ++ uint16_t v:1; ++ uint16_t ce:1; ++ uint16_t de:1; ++ uint16_t ue:1; ++ uint16_t pri:2; ++ uint16_t mo:1; ++ uint16_t c:1; ++ uint16_t tt:3; ++ uint16_t iv:1; ++ uint16_t at:4; ++ ++ uint16_t siv:1; ++ uint16_t tsv:1; ++ uint16_t rsvd0:2; ++ uint16_t scrub:1; ++ uint16_t ceco:1; ++ uint16_t rsvd1:2; ++ uint16_t ec:8; ++ ++ uint16_t rsvd2; ++ ++ uint16_t cec:16; ++ }; ++ uint64_t value; ++} riscv_reri_status; ++ ++#define RERI_STS_MASK 0x7800FF3FFFFEull ++ ++typedef struct __packed riscv_reri_error_record { ++ riscv_reri_control control_i; ++ riscv_reri_status status_i; ++ uint64_t addr_i; ++ uint64_t info_i; ++ uint64_t suppl_info_i; ++ uint64_t timestamp_i; ++ uint64_t reserved; ++ uint64_t custom; ++} riscv_reri_error_record; ++ ++typedef union riscv_reri_bank_info { ++ struct __packed { ++ uint16_t inst_id; ++ uint16_t n_err_recs; ++ uint64_t reserved0:24; ++ uint8_t version:8; ++ }; ++ uint64_t value; ++} riscv_reri_bank_info; ++ ++typedef union riscv_reri_vendor_imp_id { ++ struct __packed { ++ uint32_t vendor_id; ++ uint16_t imp_id; ++ uint16_t reserved; ++ }; ++ uint64_t value; ++} riscv_reri_vendor_imp_id; ++ ++typedef struct __packed riscv_reri_error_bank { ++ riscv_reri_vendor_imp_id vendor_n_imp_id; ++ riscv_reri_bank_info bank_info; ++ uint64_t valid_summary; ++ uint64_t reserved[2]; ++ uint64_t custom[3]; ++ riscv_reri_error_record records[MAX_ERROR_RECORDS]; ++} riscv_reri_error_bank; ++ ++#endif /* __RISCV_RERI_REGS_ */ +-- +2.27.0 + diff --git a/0008-lib-utils-Add-support-for-error-source-discovery-and.patch b/0008-lib-utils-Add-support-for-error-source-discovery-and.patch new file mode 100644 index 0000000000000000000000000000000000000000..b50f2b51a4c8f5219b0424c832e2eb8054a6e160 --- /dev/null +++ b/0008-lib-utils-Add-support-for-error-source-discovery-and.patch @@ -0,0 +1,875 @@ +From 907dcf1fc2beb87d2c7d0af5a811945bca1fdf27 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:09:57 +0530 +Subject: [PATCH 08/16] lib: utils: Add support for error source discovery and + error logging + +- Add various data structures relating to GHES/CPER. +- Add memory allocation for various error sources that are discovered + from FDT +- Log Memory(2) and Generic CPU Error in CPER format +- Add function that can be used by S-mode software to discovery error + sources at boot time. + +Reference: https://github.com/ventanamicro/opensbi/commit/d39f498aef60f0d02ed543b7bc0d634aa02a708a + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/apei_tables.h | 351 +++++++++++++++++++++++ + include/sbi_utils/ras/ghes.h | 50 ++++ + lib/utils/ras/ghes.c | 415 ++++++++++++++++++++++++++++ + lib/utils/ras/objects.mk | 2 + + 4 files changed, 818 insertions(+) + create mode 100644 include/sbi_utils/ras/apei_tables.h + create mode 100644 include/sbi_utils/ras/ghes.h + create mode 100644 lib/utils/ras/ghes.c + +diff --git a/include/sbi_utils/ras/apei_tables.h b/include/sbi_utils/ras/apei_tables.h +new file mode 100644 +index 00000000..20c61c43 +--- /dev/null ++++ b/include/sbi_utils/ras/apei_tables.h +@@ -0,0 +1,351 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#ifndef __APEI_TABLES_H ++#define __APEI_TABLES_H ++ ++#define MAX_ERR_SRCS 32 ++#define MAX_ERR_RECS 1 ++#define MAX_SECS_PER_REC 1 ++ ++enum acpi_ghes_notification_type { ++ /* Polled */ ++ ACPI_GHES_NOTIFY_POLLED = 0, ++ /* External Interrupt */ ++ ACPI_GHES_NOTIFY_EXTERNAL = 1, ++ /* Local Interrupt */ ++ ACPI_GHES_NOTIFY_LOCAL = 2, ++ /* SCI */ ++ ACPI_GHES_NOTIFY_SCI = 3, ++ /* NMI */ ++ ACPI_GHES_NOTIFY_NMI = 4, ++ /* CMCI, ACPI 5.0: 18.3.2.7, Table 18-290 */ ++ ACPI_GHES_NOTIFY_CMCI = 5, ++ /* MCE, ACPI 5.0: 18.3.2.7, Table 18-290 */ ++ ACPI_GHES_NOTIFY_MCE = 6, ++ /* GPIO-Signal, ACPI 6.0: 18.3.2.7, Table 18-332 */ ++ ACPI_GHES_NOTIFY_GPIO = 7, ++ /* ARMv8 SEA, ACPI 6.1: 18.3.2.9, Table 18-345 */ ++ ACPI_GHES_NOTIFY_SEA = 8, ++ /* ARMv8 SEI, ACPI 6.1: 18.3.2.9, Table 18-345 */ ++ ACPI_GHES_NOTIFY_SEI = 9, ++ /* External Interrupt - GSIV, ACPI 6.1: 18.3.2.9, Table 18-345 */ ++ ACPI_GHES_NOTIFY_GSIV = 10, ++ /* Software Delegated Exception, ACPI 6.2: 18.3.2.9, Table 18-383 */ ++ ACPI_GHES_NOTIFY_SDEI = 11, ++ /* RISCV Supervisor Software Event */ ++ ACPI_GHES_NOTIFY_SSE = 12, ++ /* 12 and greater are reserved */ ++ ACPI_GHES_NOTIFY_RESERVED = 13 ++}; ++ ++/* ++ * Error Source IDs for GHES. These are just place holders ++ * and each platform can define its own source ID for ++ * each error source. ++ */ ++enum { ++ ACPI_GHES_DRAM_ERROR_SOURCE_ID, ++ ACPI_GHES_GENERIC_CPU_ERROR_SOURCE_ID, ++ ACPI_GHES_SOURCE_ID_MAX, ++}; ++ ++typedef struct { ++ uint64_t ghes_addr_le; ++ bool present; /* True if GHES is present at all on this board */ ++} acpi_ghes_state; ++ ++enum { ++ ERROR_TYPE_MEM, ++ ERROR_TYPE_GENERIC_CPU, ++ ERROR_TYPE_MAX, ++}; ++ ++enum { ++ GPE_PROC_TYPE_VALID_BIT, ++ GPE_PROC_ISA_VALID_BIT, ++ GPE_PROC_ERR_TYPE_VALID_BIT, ++ GPE_OP_VALID_BIT, ++ GPE_FLAGS_VALID_BIT, ++ GPE_LEVEL_VALID_BIT, ++ GPE_CPU_VERSION_VALID_BIT, ++ GPE_CPU_BRAND_STRING_VALID_BIT, ++ GPE_CPU_ID_VALID_BIT, ++ GPE_TARGET_ADDR_VALID_BIT, ++ GPE_REQ_IDENT_VALID_BIT, ++ GPE_RESP_IDENT_VALID_BIT, ++ GPE_IP_VALID_BIT, ++ GPE_BIT_RESERVED_BITS, ++}; ++ ++#define GPE_PROC_TYPE_VALID (1ul << GPE_PROC_TYPE_VALID_BIT) ++#define GPE_PROC_ISA_VALID (1ul << GPE_PROC_ISA_VALID_BIT) ++#define GPE_PROC_ERR_TYPE_VALID (1ul << GPE_PROC_ERR_TYPE_VALID_BIT) ++#define GPE_OP_VALID (1ul << GPE_OP_VALID_BIT) ++#define GPE_FLAGS_VALID (1ul << GPE_FLAGS_VALID_BIT) ++#define GPE_LEVEL_VALID (1ul << GPE_LEVEL_VALID_BIT) ++#define GPE_CPU_VERSION_VALID (1ul << GPE_CPU_VERSION_VALID_BIT) ++#define GPE_CPU_BRAND_STRING_VALID (1ul << GPE_CPU_BRAND_STRING_VALID_BIT) ++#define GPE_CPU_ID_VALID (1ul << GPE_CPU_ID_VALID_BIT) ++#define GPE_TARGET_ADDR_VALID (1ul << GPE_TARGET_ADDR_VALID_BIT) ++#define GPE_REQ_IDENT_VALID (1ul << GPE_REQ_IDENT_VALID_BIT) ++#define GPE_RESP_IDENT_VALID (1ul << GPE_RESP_IDENT_VALID_BIT) ++#define GPE_IP_VALID (1ul << GPE_IP_VALID_BIT) ++ ++enum { ++ GHES_PROC_TYPE_IA32X64, ++ GHES_PROC_TYPE_IA64, ++ GHES_PROC_TYPE_ARM, ++ GHES_PROC_TYPE_RISCV, ++}; ++ ++enum { ++ GHES_PROC_ISA_IA32, ++ GHES_PROC_ISA_IA64, ++ GHES_PROC_ISA_X64, ++ GHES_PROC_ISA_ARM_A32, ++ GHES_PROC_ISA_ARM_A64, ++ GHES_PROC_ISA_RISCV32, ++ GHES_PROC_ISA_RISCV64, ++}; ++ ++enum { ++ AML_AS_SYSTEM_MEMORY = 0X00, ++ AML_AS_SYSTEM_IO = 0X01, ++ AML_AS_PCI_CONFIG = 0X02, ++ AML_AS_EMBEDDED_CTRL = 0X03, ++ AML_AS_SMBUS = 0X04, ++ AML_AS_FFH = 0X7F, ++}; ++ ++typedef struct __packed { ++ uint8_t asid; ++ uint8_t reg_bwidth; ++ uint8_t reg_boffs; ++ uint8_t access_sz; ++ uint64_t address; ++} acpi_gas; ++ ++typedef struct __packed { ++ uint8_t type; ++ uint8_t length; ++ uint16_t config_we; ++ uint32_t poll_interval; ++ uint32_t vector; ++ uint32_t poll_switch_thresh; ++ uint32_t poll_switch_thresh_win; ++ uint32_t err_thresh; ++ uint32_t err_thresh_win; ++} acpi_ghes_notif; ++ ++typedef struct __packed { ++ uint16_t type; ++ uint16_t src_id; ++ uint16_t rel_id; ++ uint8_t flags; ++ uint8_t enabled; ++ uint32_t num_rec_pre_alloc; ++ uint32_t max_sec_per_rec; ++ uint32_t max_raw_dlen; ++ acpi_gas gas; ++ acpi_ghes_notif notif; ++ uint32_t err_status_block_len; ++} acpi_ghes; ++ ++typedef struct __packed { ++ acpi_ghes ghes; ++ acpi_gas ack_reg; ++ uint64_t ack_preserve; ++ uint64_t ack_write; ++} acpi_ghesv2; ++ ++typedef struct __packed { ++ uint8_t type[16]; ++} acpi_ghes_section_type; ++ ++/* ++ * CPER Format ++ * ++ * +------------------------------------+ ++ * | Record Header | ++ * +------------------------------------+ ++ * | Section Descriptor | ++ * +------------------------------------+ ++ * | Section Descriptor (N) | ++ * +------------------------------------+ ++ * | Section | ++ * +------------------------------------+ ++ * | Section (N) | ++ * +------------------------------------+ ++ */ ++typedef struct __packed { ++ uint8_t sign[4]; ++ uint16_t rev; ++ uint32_t sign_end; ++ uint16_t section_count; ++ uint32_t err_sev; ++ uint32_t vbits; /* validation bits */ ++ uint32_t rec_len; ++ uint64_t timestamp; ++ uint8_t plat_id[16]; ++ uint8_t part_id[48]; ++ uint8_t creator_id[16]; ++ uint8_t notif_type[16]; ++ uint64_t rec_id; /* record id */ ++ uint32_t flags; ++ uint64_t persistence_info; ++ uint8_t resvd[12]; ++} cper_header; ++ ++typedef struct __packed { ++ uint32_t section_offs; ++ uint32_t section_len; ++ uint16_t rev; ++ uint8_t vbits; /* validation bits */ ++ uint8_t resvd; ++ uint32_t flags; ++ acpi_ghes_section_type section_type; ++ uint8_t fru_id[16]; ++ uint32_t section_sev; /* Severity */ ++ uint8_t fru_text[20]; ++} cper_section_desc; ++ ++typedef struct __packed { ++ uint64_t vbits; /* validation bits */ ++ uint8_t proc_type; ++ uint8_t proc_isa; ++ uint8_t proc_err_type; ++ uint8_t operation; ++ uint8_t flags; ++ uint8_t level; ++ uint16_t resvd; ++ uint64_t cpu_version_info; ++ uint8_t cpu_brand_string[128]; ++ uint64_t proc_id; ++ uint64_t target_addr; ++ uint64_t requestor_id; ++ uint64_t responder_id; ++ uint64_t ins_ip; /* Instruction IP */ ++} cper_gen_proc_sec; ++ ++typedef struct __packed { ++ uint64_t vbits; ++ uint64_t err_status; ++ uint64_t phys_addr; ++ uint64_t phys_addr_mask; ++ uint16_t node; ++ uint16_t card; ++ uint16_t module; ++ uint16_t bank; ++ uint32_t device; ++ uint32_t row; ++ uint32_t column; ++ uint32_t rank; ++ uint32_t bit_pos; ++ uint8_t chip_id; ++ uint8_t err_type; ++ uint8_t status; ++ uint8_t resvd; ++ uint64_t requestor_id; ++ uint64_t responder_id; ++ uint64_t target_id; ++ uint32_t card_handle; ++ uint32_t module_handle; ++} cper_mem2_sec; ++ ++typedef union __packed { ++ cper_gen_proc_sec ps; ++ cper_mem2_sec ms; ++} cper_section; ++ ++/* CPER Record */ ++typedef struct __packed { ++ cper_section sections[MAX_SECS_PER_REC]; ++} acpi_ghes_cper; ++ ++typedef struct __packed { ++ acpi_ghes_section_type type; ++ uint32_t err_sev; ++ uint16_t rev; ++ uint8_t vbits; ++ uint8_t flags; ++ uint32_t err_dlen; ++ uint8_t fru_id[16]; ++ uint8_t fru_text[20]; ++ acpi_ghes_cper cpers[MAX_ERR_RECS]; ++} acpi_ghes_data_entry; ++ ++typedef struct __packed { ++ uint32_t block_status; ++ uint32_t raw_doffs; ++ uint32_t raw_dlen; ++ uint32_t data_len; ++ uint32_t err_sev; ++ acpi_ghes_data_entry entry; ++} acpi_ghes_status_block; ++ ++#define KiB 1024 ++ ++/* The max size in bytes for one error block */ ++#define ACPI_GHES_MAX_RAW_DATA_LENGTH (1 * KiB) ++ ++/* Generic Hardware Error Source version 2 */ ++#define ACPI_GHES_SOURCE_GENERIC_ERROR_V2 10 ++ ++/* Address offset in Generic Address Structure(GAS) */ ++#define GAS_ADDR_OFFSET 4 ++ ++/* ++ * The total size of Generic Error Data Entry ++ * ACPI 6.1/6.2: 18.3.2.7.1 Generic Error Data, ++ * Table 18-343 Generic Error Data Entry ++ */ ++#define ACPI_GHES_DATA_LENGTH 64 ++ ++/* The memory section CPER size, UEFI 2.6: N.2.5 Memory Error Section */ ++#define ACPI_GHES_MEM_CPER_LENGTH 80 ++ ++/* The generic cpu CPER size, UEFI 2.10 2.4.1 Generic processor error */ ++#define ACPI_GHES_GENERIC_CPU_CPER_LENGTH 192 ++ ++/* Masks for block_status flags */ ++#define ACPI_GEBS_UNCORRECTABLE (0x1UL << 0) ++#define ACPI_GEBS_CORRECTABLE (0x1UL << 1) ++#define ACPI_GEBS_MULTI_UNCORRECTABLE (0x1UL << 2) ++#define ACPI_GEBS_MULTI_CORRECTABLE (0x1UL << 3) ++ ++/* ++ * Total size for Generic Error Status Block except Generic Error Data Entries ++ * ACPI 6.2: 18.3.2.7.1 Generic Error Data, ++ * Table 18-380 Generic Error Status Block ++ */ ++#define ACPI_GHES_GESB_SIZE 20 ++ ++/* ++ * Values for error_severity field ++ */ ++enum acpi_generic_error_severity { ++ ACPI_CPER_SEV_RECOVERABLE = 0, ++ ACPI_CPER_SEV_FATAL = 1, ++ ACPI_CPER_SEV_CORRECTED = 2, ++ ACPI_CPER_SEV_NONE = 3, ++}; ++ ++typedef struct { ++ uint8_t b[16]; ++} uuid_le; ++ ++#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ ++((uuid_le) \ ++{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \ ++ (b) & 0xff, ((b) >> 8) & 0xff, \ ++ (c) & 0xff, ((c) >> 8) & 0xff, \ ++ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }}) ++ ++#endif /* __APEI_TABLES_H */ +diff --git a/include/sbi_utils/ras/ghes.h b/include/sbi_utils/ras/ghes.h +new file mode 100644 +index 00000000..b3b43bbd +--- /dev/null ++++ b/include/sbi_utils/ras/ghes.h +@@ -0,0 +1,50 @@ ++/* ++ * RISC-V RERI Registers Definitions ++ * ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ * ++ */ ++ ++#ifndef __ACPI_GHES_H ++#define __ACPI_GHES_H ++ ++typedef struct { ++ uint32_t etype; ++ union { ++ struct { ++ uint32_t validation_bits; ++ uint32_t sev; ++ uint8_t proc_type; ++ uint8_t proc_isa; ++ uint8_t proc_err_type; ++ uint8_t operation; ++ uint8_t flags; ++ uint8_t level; ++ uint64_t cpu_version; ++ uint8_t cpu_brand_string[128]; ++ uint64_t cpu_id; ++ uint64_t target_addr; ++ uint64_t req_ident; ++ uint64_t resp_ident; ++ uint64_t ip; ++ } gpe; /* generic processor error */ ++ ++ struct { ++ uint64_t physical_address; ++ } me; /* DRAM Error */ ++ } info; ++} acpi_ghes_error_info; ++ ++void acpi_ghes_init(uint64_t addr, uint64_t size); ++int acpi_ghes_new_error_source(uint64_t err_src_id, uint64_t sse_v); ++void acpi_ghes_record_errors(uint8_t source_id, acpi_ghes_error_info *einfo); ++int acpi_ghes_get_num_err_srcs(void); ++int acpi_ghes_get_err_srcs_list(uint32_t *src_ids, uint32_t sz); ++int acpi_ghes_get_err_src_desc(uint32_t src_id, acpi_ghesv2 *ghes); ++ ++#endif +diff --git a/lib/utils/ras/ghes.c b/lib/utils/ras/ghes.c +new file mode 100644 +index 00000000..af9636f9 +--- /dev/null ++++ b/lib/utils/ras/ghes.c +@@ -0,0 +1,415 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Structure for ghesv2 memory allocation housekeeping */ ++typedef struct { ++ uint64_t init_done; ++ uint64_t ghes_gas_reg_mem_sz; ++ uint64_t ghes_gas_reg_addr; ++ uint64_t ghes_gas_reg_addr_curr; ++ uint64_t ghes_gas_reg_end_addr; ++ uint64_t ghes_err_addr; ++ uint64_t ghes_err_mem_sz; ++ uint64_t ghes_err_addr_curr; ++ uint64_t ghes_err_end_addr; ++} acpi_ghes_mem_info; ++ ++typedef u64 read_ack_reg_t; ++ ++/* Ghes entries for error sources */ ++static acpi_ghesv2 *err_sources; ++static int err_src_alloc_idx; ++ ++/* ghesv2 memory housekeeping */ ++static acpi_ghes_mem_info ghes_mem; ++ ++/* GAS registers that will contain status block addresses */ ++static u64 *gas_register_allocs[MAX_ERR_SRCS]; ++static int gai; ++ ++#define ROUNDUP_2_64B(sz) ((sz + 0x40) & ~(0x40 - 1)) ++ ++/* OSPM reach/ack register is at the end of each status block */ ++#define GHES_BLOCK_STATUS_SZ (sizeof(acpi_ghes_status_block) + sizeof(read_ack_reg_t)) ++ ++void acpi_ghes_init(uint64_t addr, uint64_t size) ++{ ++ if (size == 0 || addr == 0) ++ return; ++ ++ /* Allocate memory for GHES entries */ ++ err_sources = sbi_malloc(sizeof(acpi_ghesv2) * MAX_ERR_SRCS); ++ if (err_sources == NULL) ++ return; ++ ++ /* Initialize GHES memory pool */ ++ memset(&ghes_mem, 0, sizeof(ghes_mem)); ++ memset(&err_sources[0], 0, sizeof(err_sources)); ++ ghes_mem.ghes_err_addr_curr = ghes_mem.ghes_err_addr = addr; ++ ghes_mem.ghes_err_mem_sz = size; ++ ghes_mem.ghes_err_end_addr = (addr + (ROUNDUP_2_64B(GHES_BLOCK_STATUS_SZ) * MAX_ERR_SRCS + 64)); ++ ++ /* Initialize the generic address structure pool */ ++ ghes_mem.ghes_gas_reg_mem_sz = sizeof(u64) * MAX_ERR_SRCS; ++ ghes_mem.ghes_gas_reg_addr = ghes_mem.ghes_err_end_addr + 64; ++ ghes_mem.ghes_gas_reg_addr_curr = ghes_mem.ghes_gas_reg_addr; ++ ghes_mem.ghes_gas_reg_end_addr = (ghes_mem.ghes_gas_reg_addr + ghes_mem.ghes_gas_reg_mem_sz); ++ ghes_mem.init_done = 1; ++} ++ ++/* ++ * Each status block contains address to a register which then points to ++ * the status block. Allocate memory for u64* pointer which will contain ++ * pointer to status block. Address of this register is stored in GAS entry ++ */ ++static u64 *acpi_ghes_gas_register_alloc(void) ++{ ++ u64 addr; ++ ++ if (ghes_mem.ghes_gas_reg_addr_curr >= ghes_mem.ghes_gas_reg_end_addr) ++ return NULL; ++ ++ addr = ghes_mem.ghes_gas_reg_addr_curr; ++ ghes_mem.ghes_gas_reg_addr_curr += sizeof(u64); ++ ++ gas_register_allocs[gai] = (u64 *)(ulong)addr; ++ gai++; ++ ++ return (u64 *)(ulong)addr; ++} ++ ++static void *acpi_ghes_alloc(uint64_t size) ++{ ++ uint64_t naddr; ++ uint64_t nsz; ++ ++ if (!ghes_mem.init_done) ++ return NULL; ++ ++ /* if not multiple of 64-bytes */ ++ if (size & (0x40 - 1)) ++ /* round up to next 64 bytes */ ++ nsz = ROUNDUP_2_64B(size); ++ else ++ nsz = size; ++ ++ if (ghes_mem.ghes_err_addr_curr + nsz >= ghes_mem.ghes_err_end_addr) ++ return NULL; ++ ++ ++ naddr = ghes_mem.ghes_err_addr_curr; ++ ghes_mem.ghes_err_addr_curr = ghes_mem.ghes_err_addr_curr + nsz; ++ ++ return ((void *)(ulong)naddr); ++} ++ ++int acpi_ghes_new_error_source(uint64_t err_src_id, uint64_t sse_v) ++{ ++ acpi_ghesv2 *err_src; ++ acpi_ghes_status_block *sblock; ++ u64 rar_addr; /* read ack register address */ ++ u64 *baddr; ++ ++ if (err_src_alloc_idx >= MAX_ERR_SRCS) ++ return SBI_EINVAL; ++ ++ if (!ghes_mem.init_done) ++ return SBI_EINVAL; ++ ++ /* allocate generic address structure register */ ++ baddr = acpi_ghes_gas_register_alloc(); ++ if (baddr == NULL) ++ return SBI_EINVAL; ++ ++ /* Allocate GHESv2 for source */ ++ err_src = (err_sources + err_src_alloc_idx); ++ ++ err_src->ghes.type = ACPI_GHES_SOURCE_GENERIC_ERROR_V2; ++ err_src->ghes.enabled = 1; ++ err_src->ghes.src_id = err_src_id; ++ err_src->ghes.num_rec_pre_alloc = MAX_ERR_RECS; ++ err_src->ghes.max_sec_per_rec = MAX_SECS_PER_REC; ++ ++ /* Allocate GHESv2 status block 8*/ ++ sblock = acpi_ghes_alloc(GHES_BLOCK_STATUS_SZ); ++ if (sblock == NULL) ++ return -1; ++ ++ err_src->ghes.err_status_block_len = sizeof(*sblock); ++ ++ /* OSPM Acknowledgement address (end of status block) */ ++ rar_addr = (ulong)((ulong)sblock + sizeof(acpi_ghes_status_block)); ++ /* Store status block pointer to GAS register address */ ++ *baddr = (ulong)sblock; ++ ++ /* Initialize GAS */ ++ err_src->ghes.gas.asid = AML_AS_SYSTEM_MEMORY; ++ err_src->ghes.gas.reg_bwidth = sizeof(u64) * 8; /* 64-bit wide access */ ++ err_src->ghes.gas.reg_boffs = 0x0; ++ err_src->ghes.gas.access_sz = 4; /* quad word access */ ++ err_src->ghes.gas.address = (ulong)baddr; /* GAS register address */ ++ ++ /* Initialize read ack register */ ++ err_src->ack_reg.address = rar_addr; ++ err_src->ack_reg.reg_bwidth = sizeof(read_ack_reg_t) * 8; ++ err_src->ack_reg.reg_boffs = 0; ++ err_src->ack_reg.access_sz = 4; ++ err_src->ack_preserve = ~(1UL << sse_v); ++ err_src->ack_write = (1UL << sse_v); ++ ++ /* Initialize notification structure */ ++ err_src->ghes.notif.type = ACPI_GHES_NOTIFY_SSE; ++ err_src->ghes.notif.length = 28; ++ err_src->ghes.notif.config_we = 0; ++ err_src->ghes.notif.poll_interval = 0; ++ err_src->ghes.notif.vector = sse_v; ++ err_src->ghes.notif.poll_switch_thresh = 0; ++ err_src->ghes.notif.poll_switch_thresh_win = 0; ++ err_src->ghes.notif.err_thresh = 0; ++ err_src->ghes.notif.err_thresh_win = 0; ++ ++ err_src_alloc_idx++; ++ ++ return 0; ++} ++ ++static acpi_ghesv2 *find_error_source_by_id(uint8_t src_id) ++{ ++ int i; ++ acpi_ghesv2 *err_src; ++ ++ for (i = 0; i < err_src_alloc_idx; i++) { ++ err_src = &err_sources[i]; ++ if (err_src->ghes.src_id == src_id) ++ return err_src; ++ } ++ ++ return NULL; ++} ++ ++static int ospm_acked_prev_err(acpi_gas *read_ack_register, ++ uint64_t ack_preserve, uint64_t ack_write) ++{ ++ uint64_t resp; ++ ++ /* If there is no ack register, assume the previous error ack'ed */ ++ if (!read_ack_register->address) ++ return 1; ++ ++ resp = *((volatile uint64_t *)(ulong)read_ack_register->address); ++ ++ /* If register contains zero, assume its acked */ ++ if (!resp) ++ return 1; ++ ++ resp &= ack_preserve; ++ resp |= ack_write; ++ ++ return !!resp; ++} ++ ++static void ghes_record_mem_error(acpi_ghes_status_block *error_block, ++ uint64_t error_physical_addr) ++{ ++ /* Memory Error Section Type */ ++ const uuid_le uefi_cper_mem_sec = ++ UUID_LE(0xA5BC1114, 0x6F64, 0x4EDE, 0xB8, 0x63, 0x3E, 0x83, \ ++ 0xED, 0x7C, 0x83, 0xB1); ++ uint32_t data_length; ++ acpi_ghes_data_entry *dentry; ++ cper_mem2_sec *msec; ++ ++ /* This is the length if adding a new generic error data entry*/ ++ data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_MEM_CPER_LENGTH; ++ ++ /* Build the new generic error status block header */ ++ error_block->block_status = ACPI_GEBS_UNCORRECTABLE; ++ error_block->raw_doffs = 0; ++ error_block->raw_dlen = 0; ++ error_block->data_len = data_length; ++ error_block->err_sev = ACPI_CPER_SEV_RECOVERABLE; ++ ++ /* Build generic data entry header */ ++ dentry = &error_block->entry; ++ memcpy(dentry->type.type, &uefi_cper_mem_sec, sizeof(dentry->type)); ++ dentry->err_sev = ACPI_CPER_SEV_RECOVERABLE; ++ dentry->vbits = 0; ++ dentry->flags = 0; ++ dentry->err_dlen = ACPI_GHES_MEM_CPER_LENGTH; ++ memset(dentry->fru_id, 0, sizeof(dentry->fru_id)); ++ ++ msec = &error_block->entry.cpers[0].sections[0].ms; ++ memset(msec, 0, sizeof(*msec)); ++ msec->vbits |= 0x1UL; ++ msec->phys_addr = error_physical_addr; ++ msec->phys_addr_mask = (uint64_t)-1; ++} ++ ++static void ghes_record_generic_cpu_error(acpi_ghes_status_block *error_block, ++ acpi_ghes_error_info *einfo) ++{ ++ acpi_ghes_data_entry *dentry; ++ cper_gen_proc_sec *psec; ++ ++ /* Generic CPU Error Section Type */ ++ const uuid_le uefi_cper_generic_cpu_sec = ++ UUID_LE(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, \ ++ 0xF1, 0x93, 0xC4, 0xF3, 0xDB); ++ ++ uint32_t data_length; ++ ++ /* This is the length if adding a new generic error data entry */ ++ data_length = ACPI_GHES_DATA_LENGTH + ACPI_GHES_GENERIC_CPU_CPER_LENGTH; ++ ++ /* Build the generic error status block */ ++ error_block->block_status = ACPI_GEBS_UNCORRECTABLE; ++ error_block->raw_doffs = 0; ++ error_block->raw_dlen = 0; ++ error_block->data_len = data_length; ++ error_block->err_sev = einfo->info.gpe.sev; ++ ++ /* Build generic data entry header */ ++ dentry = &error_block->entry; ++ memcpy(dentry->type.type, &uefi_cper_generic_cpu_sec, ++ sizeof(dentry->type)); ++ dentry->err_sev = einfo->info.gpe.sev; ++ dentry->vbits = 0; ++ dentry->flags = 0; ++ dentry->err_dlen = ACPI_GHES_GENERIC_CPU_CPER_LENGTH; ++ memset(dentry->fru_id, 0, sizeof(dentry->fru_id)); ++ ++ /* generi processor error section */ ++ psec = &error_block->entry.cpers[0].sections[0].ps; ++ psec->vbits = einfo->info.gpe.validation_bits; ++ ++ /* Processor Type */ ++ if (einfo->info.gpe.validation_bits & GPE_PROC_TYPE_VALID) ++ psec->proc_type = einfo->info.gpe.proc_type; ++ /* ISA */ ++ if (einfo->info.gpe.validation_bits & GPE_PROC_ISA_VALID) ++ psec->proc_isa = einfo->info.gpe.proc_isa; ++ /* Error Type */ ++ if (einfo->info.gpe.validation_bits & GPE_PROC_ERR_TYPE_VALID) ++ psec->proc_err_type = einfo->info.gpe.proc_err_type; ++ /* Operation */ ++ if (einfo->info.gpe.validation_bits & GPE_OP_VALID) ++ psec->operation = einfo->info.gpe.operation; ++ /* Flags */ ++ if (einfo->info.gpe.validation_bits & GPE_FLAGS_VALID) ++ psec->flags = einfo->info.gpe.flags; ++ /* Level */ ++ if (einfo->info.gpe.validation_bits & GPE_LEVEL_VALID) ++ psec->level = einfo->info.gpe.level; ++ ++ /* Reserved field - must always be zero */ ++ psec->resvd = 0; ++ ++ /* CPU version */ ++ if (einfo->info.gpe.validation_bits & GPE_CPU_VERSION_VALID) ++ psec->cpu_version_info = einfo->info.gpe.cpu_version; ++ ++ if (einfo->info.gpe.validation_bits & GPE_CPU_ID_VALID) ++ psec->proc_id = einfo->info.gpe.cpu_id; ++ ++ if (einfo->info.gpe.validation_bits & GPE_TARGET_ADDR_VALID) ++ psec->target_addr = einfo->info.gpe.target_addr; ++ ++ if (einfo->info.gpe.validation_bits & GPE_REQ_IDENT_VALID) ++ psec->requestor_id = einfo->info.gpe.req_ident; ++ ++ if (einfo->info.gpe.validation_bits & GPE_RESP_IDENT_VALID) ++ psec->responder_id = einfo->info.gpe.resp_ident; ++ ++ if (einfo->info.gpe.validation_bits & GPE_IP_VALID) ++ psec->ins_ip = einfo->info.gpe.ip; ++} ++ ++void acpi_ghes_record_errors(uint8_t source_id, acpi_ghes_error_info *einfo) ++{ ++ acpi_ghesv2 *err_src; ++ acpi_ghes_status_block *sblock; ++ u64 *gas; ++ ++ err_src = find_error_source_by_id(source_id); ++ if (!err_src) ++ return; ++ ++ if (!ospm_acked_prev_err(&err_src->ack_reg, err_src->ack_preserve, ++ err_src->ack_write)) { ++ sbi_printf("OSPM hasn't acknowledged the previous error. New " ++ "error record cannot be created.\n"); ++ return; ++ } ++ ++ /* ++ * FIXME: Read gas address via a function that respects the ++ * gas parameters. Don't read directly after typecast. ++ */ ++ gas = (u64 *)(ulong)err_src->ghes.gas.address; ++ sblock = (acpi_ghes_status_block *)(ulong)(*gas); ++ ++ if (einfo->etype == ERROR_TYPE_MEM && einfo->info.me.physical_address) { ++ ghes_record_mem_error(sblock, einfo->info.me.physical_address); ++ } else if (einfo->etype == ERROR_TYPE_GENERIC_CPU) { ++ ghes_record_generic_cpu_error(sblock, einfo); ++ } else { ++ sbi_printf("%s: Unknown error type %u\n", __func__, einfo->etype); ++ } ++} ++ ++/* ++ * Functions to provide error source information over ++ * mpxy or other transport. ++ */ ++int acpi_ghes_get_num_err_srcs(void) ++{ ++ return err_src_alloc_idx; ++} ++ ++int acpi_ghes_get_err_srcs_list(uint32_t *src_ids, uint32_t sz) ++{ ++ int i; ++ acpi_ghesv2 *src; ++ ++ src = &err_sources[0]; ++ ++ for (i = 0; i < err_src_alloc_idx; i++) { ++ src_ids[i] = src->ghes.src_id; ++ src++; ++ } ++ ++ return err_src_alloc_idx; ++} ++ ++int acpi_ghes_get_err_src_desc(uint32_t src_id, acpi_ghesv2 *ghes) ++{ ++ acpi_ghesv2 *g; ++ ++ g = find_error_source_by_id(src_id); ++ ++ if (g == NULL) ++ return -SBI_ENOENT; ++ ++ memcpy(ghes, g, sizeof(acpi_ghesv2)); ++ ++ return 0; ++} +diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk +index 2200521e..9035fd50 100644 +--- a/lib/utils/ras/objects.mk ++++ b/lib/utils/ras/objects.mk +@@ -9,3 +9,5 @@ + + carray-fdt_early_drivers-$(CONFIG_FDT_RAS_RPMI) += fdt_ras_rpmi + libsbiutils-objs-$(CONFIG_FDT_RAS_RPMI) += ras/fdt_ras_rpmi.o ++ ++libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/ghes.o +-- +2.27.0 + diff --git a/0009-lib-utils-Add-support-for-RAS-agent-in-OpenSBI.patch b/0009-lib-utils-Add-support-for-RAS-agent-in-OpenSBI.patch new file mode 100644 index 0000000000000000000000000000000000000000..a90ea14ca84557b8dfd8446cc90088755d9d49e0 --- /dev/null +++ b/0009-lib-utils-Add-support-for-RAS-agent-in-OpenSBI.patch @@ -0,0 +1,581 @@ +From 5aa5c71882704a3e0c3395ecb2ba3b0a10d9635f Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:20:55 +0530 +Subject: [PATCH 09/16] lib: utils: Add support for RAS agent in OpenSBI + +The RAS agent in OpenSBI get information about various error sources +in the system and their properties. The properties include the reri +bank addresse, the notification type, SSE vector, etc. It parses the +error sources and allocates buffers for APEI Error Status block for +each error source. On an error, the reri registers of the error sources +are read and their corresponding CPER record is generated and logged. + +Reference: https://github.com/ventanamicro/opensbi/commit/1e529e9e10d32a87aa91a865b860bdcfb7239687 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/reri_drv.h | 18 ++ + lib/utils/ras/fdt_ras_agent.c | 88 +++++++ + lib/utils/ras/objects.mk | 4 + + lib/utils/ras/reri_drv.c | 415 +++++++++++++++++++++++++++++++ + 4 files changed, 525 insertions(+) + create mode 100644 include/sbi_utils/ras/reri_drv.h + create mode 100644 lib/utils/ras/fdt_ras_agent.c + create mode 100644 lib/utils/ras/reri_drv.c + +diff --git a/include/sbi_utils/ras/reri_drv.h b/include/sbi_utils/ras/reri_drv.h +new file mode 100644 +index 00000000..c3d9e4d3 +--- /dev/null ++++ b/include/sbi_utils/ras/reri_drv.h +@@ -0,0 +1,18 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#ifndef __RERI_DRV_H ++#define __RERI_DRV_H ++ ++#include ++ ++int reri_drv_init(const void *fdt, int nodeoff, const struct fdt_match *match); ++int reri_drv_sync_hart_errs(u32 hart_id, u32 *pending_vectors); ++ ++#endif +diff --git a/lib/utils/ras/fdt_ras_agent.c b/lib/utils/ras/fdt_ras_agent.c +new file mode 100644 +index 00000000..f93a1638 +--- /dev/null ++++ b/lib/utils/ras/fdt_ras_agent.c +@@ -0,0 +1,88 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int ra_init_done = 0; ++ ++static int sbi_ras_agent_sync_hart_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ u32 hart_id = current_hartid(); ++ u32 num_errs; ++ ++ if (!ra_init_done) ++ return -1; ++ ++ if ((num_errs = reri_drv_sync_hart_errs(hart_id, pending_vectors)) == 0) ++ return SBI_EFAIL; ++ ++ *nr_pending = num_errs; ++ *nr_remaining = 0; ++ ++ return 0; ++} ++ ++static int sbi_ras_agent_sync_dev_errs(u32 *pending_vectors, u32 *nr_pending, ++ u32 *nr_remaining) ++{ ++ return SBI_SUCCESS; ++} ++ ++static int sbi_ras_agent_probe(void) ++{ ++ return 0; ++} ++ ++static struct sbi_ras_agent sbi_ras_agent = { ++ .name = "sbi-ras-agent", ++ .ras_sync_hart_errs = sbi_ras_agent_sync_hart_errs, ++ .ras_sync_dev_errs = sbi_ras_agent_sync_dev_errs, ++ .ras_probe = sbi_ras_agent_probe, ++}; ++ ++static int sbi_ras_agent_cold_init(const void *fdt, int nodeoff, ++ const struct fdt_match *match) ++{ ++ int ret; ++ ++ /* initialize reri driver */ ++ ret = reri_drv_init(fdt, nodeoff, match); ++ if (ret) ++ return ret; ++ ++ /* ready to handle errors */ ++ sbi_ras_set_agent(&sbi_ras_agent); ++ ++ ra_init_done = 1; ++ ++ return 0; ++} ++ ++static const struct fdt_match sbi_ras_agent_match[] = { ++ { .compatible = "riscv,sbi-ras-agent" }, ++ {}, ++}; ++ ++const struct fdt_driver fdt_sbi_ras_agent = { ++ .match_table = sbi_ras_agent_match, ++ .init = sbi_ras_agent_cold_init, ++}; +diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk +index 9035fd50..1b59de83 100644 +--- a/lib/utils/ras/objects.mk ++++ b/lib/utils/ras/objects.mk +@@ -11,3 +11,7 @@ carray-fdt_early_drivers-$(CONFIG_FDT_RAS_RPMI) += fdt_ras_rpmi + libsbiutils-objs-$(CONFIG_FDT_RAS_RPMI) += ras/fdt_ras_rpmi.o + + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/ghes.o ++ ++carray-fdt_early_drivers-$(CONFIG_FDT_SBI_RAS_AGENT) += fdt_sbi_ras_agent ++libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/fdt_ras_agent.o ++libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/reri_drv.o +diff --git a/lib/utils/ras/reri_drv.c b/lib/utils/ras/reri_drv.c +new file mode 100644 +index 00000000..ae011e29 +--- /dev/null ++++ b/lib/utils/ras/reri_drv.c +@@ -0,0 +1,415 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct reri_generic_dev { ++ uint64_t addr; ++ uint64_t size; ++ uint32_t sse_vector; ++ uint16_t src_id; ++ uint16_t res; ++}; ++ ++typedef struct reri_generic_dev reri_dram_dev_t; ++ ++typedef struct reri_hart_dev { ++ struct reri_generic_dev dev; ++ int hart_id; ++} reri_hart_dev_t; ++ ++static reri_hart_dev_t *reri_hart_devices = NULL; ++static reri_dram_dev_t reri_dram_dev; ++static uint32_t reri_nr_harts = 0; ++ ++#define RERI_HART_COMPAT "riscv,reri-harts" ++#define RERI_DRAM_COMPAT "riscv,reri-dram" ++#define APEI_MEM_COMPAT "riscv,apei-mem" ++#define RERI_ERR_BANK_SIZE 0x1000 ++ ++static uint64_t riscv_reri_dev_read_u64(void *dev_addr) ++{ ++ return *((volatile uint64_t *)dev_addr); ++} ++ ++static void riscv_reri_dev_write_u64(void *dev_addr, uint64_t value) ++{ ++ *((volatile uint64_t *)dev_addr) = value; ++} ++ ++static void riscv_reri_clear_valid_bit(void *control_addr) ++{ ++ uint64_t control; ++ ++ control = riscv_reri_dev_read_u64(control_addr); ++ ++ /* set SINV */ ++ control |= 0x4; ++ ++ riscv_reri_dev_write_u64(control_addr, control); ++} ++ ++static reri_hart_dev_t *get_reri_hart_dev(int hart_id) ++{ ++ int i; ++ ++ for (i = 0; i < reri_nr_harts; i++) { ++ if (reri_hart_devices[i].hart_id == hart_id) { ++ return &reri_hart_devices[i]; ++ } ++ } ++ ++ return NULL; ++} ++ ++static int riscv_reri_get_hart_addr(int hart_id, uint64_t *hart_addr, ++ uint64_t *size) ++{ ++ reri_hart_dev_t *reri_hart; ++ ++ reri_hart = get_reri_hart_dev(hart_id); ++ if (!reri_hart) ++ return SBI_ENOENT; ++ ++ *hart_addr = reri_hart->dev.addr; ++ *size = reri_hart->dev.size; ++ ++ return SBI_SUCCESS; ++} ++ ++static uint32_t riscv_reri_get_hart_sse_vector(int hart_id, uint32_t *sse_vector) ++{ ++ reri_hart_dev_t *reri_hart; ++ ++ reri_hart = get_reri_hart_dev(hart_id); ++ if (!reri_hart) ++ return SBI_ENOENT; ++ ++ *sse_vector = reri_hart->dev.sse_vector; ++ ++ return 0; ++} ++ ++static uint32_t riscv_reri_get_hart_src_id(int hart_id, uint32_t *hart_src_id) ++{ ++ reri_hart_dev_t *reri_hart; ++ ++ reri_hart = get_reri_hart_dev(hart_id); ++ if (!reri_hart) ++ return SBI_ENOENT; ++ ++ *hart_src_id = reri_hart->dev.src_id; ++ ++ return 0; ++} ++ ++static int fdt_parse_reri_device(const void *fdt, int nodeoff) ++{ ++ int len, i, nr_harts, hart_phandle, cpu_offset, ret = SBI_SUCCESS; ++ const fdt32_t *sse_vec_p, *src_id_p, *target_harts_p, *hart_id_p; ++ const char *cpu_status; ++ uint64_t addr, size; ++ uint32_t sse_vec; ++ uint16_t src_id; ++ ++ if ((ret = fdt_node_check_compatible(fdt, nodeoff, ++ RERI_DRAM_COMPAT)) == 0) { ++ ret = fdt_get_node_addr_size((void *)fdt, nodeoff, 0, &addr, &size); ++ if (ret < 0) ++ return ret; ++ reri_dram_dev.addr = addr; ++ reri_dram_dev.size = size; ++ ++ /* ++ * This should be M-mode and S-mode shared region for ++ * error injection support. ++ */ ++ ret = sbi_domain_root_add_memrange(addr, size, PAGE_SIZE, ++ (SBI_DOMAIN_MEMREGION_MMIO | ++ SBI_DOMAIN_MEMREGION_READABLE| ++ SBI_DOMAIN_MEMREGION_WRITEABLE)); ++ if (ret < 0) ++ return ret; ++ ++ sse_vec_p = fdt_getprop(fdt, nodeoff, "sse-event-id", &len); ++ if (!sse_vec_p) ++ return SBI_ENOENT; ++ sse_vec = fdt32_to_cpu(*sse_vec_p); ++ reri_dram_dev.sse_vector = sse_vec; ++ ++ src_id_p = fdt_getprop(fdt, nodeoff, "source-id", &len); ++ if (!src_id_p) ++ return SBI_ENOENT; ++ src_id = fdt32_to_cpu(*src_id_p); ++ reri_dram_dev.src_id = src_id; ++ ++ if ((ret = acpi_ghes_new_error_source(src_id, sse_vec)) < 0) { ++ sbi_printf("Failed to create new DRAM error source\n"); ++ return ret; ++ } ++ ++ ret = sbi_sse_add_event(sse_vec, NULL); ++ ret = (ret != SBI_EALREADY) ? ret : 0; ++ if (ret) { ++ sbi_printf("Failed to add SSE event %u for error source %u\n", ++ sse_vec, src_id); ++ return ret; ++ } ++ } else if ((ret = fdt_node_check_compatible(fdt, nodeoff, ++ RERI_HART_COMPAT)) == 0) { ++ ret = fdt_get_node_addr_size((void *)fdt, nodeoff, 0, &addr, &size); ++ if (ret < 0) ++ return ret; ++ ++ /* ++ * This should be M-mode and S-mode shared region for ++ * error injection support. ++ */ ++ ret = sbi_domain_root_add_memrange(addr, size, PAGE_SIZE, ++ (SBI_DOMAIN_MEMREGION_MMIO | ++ SBI_DOMAIN_MEMREGION_READABLE| ++ SBI_DOMAIN_MEMREGION_WRITEABLE)); ++ if (ret < 0) ++ return ret; ++ ++ sse_vec_p = fdt_getprop(fdt, nodeoff, "sse-event-id", &len); ++ if (!sse_vec_p) ++ return SBI_ENOENT; ++ sse_vec = fdt32_to_cpu(*sse_vec_p); ++ ++ src_id_p = fdt_getprop(fdt, nodeoff, "base-source-id", &len); ++ if (!src_id_p) ++ return SBI_ENOENT; ++ src_id = fdt32_to_cpu(*src_id_p); ++ ++ target_harts_p = fdt_getprop(fdt, nodeoff, "target-harts", &len); ++ if (target_harts_p && len >= sizeof(fdt32_t)) { ++ reri_nr_harts = nr_harts = len / sizeof(fdt32_t); ++ reri_hart_devices = (reri_hart_dev_t *)sbi_malloc(sizeof(reri_hart_dev_t) ++ * nr_harts); ++ if (!reri_hart_devices) ++ return SBI_ENOMEM; ++ ++ memset(reri_hart_devices, 0, sizeof(reri_hart_dev_t) * nr_harts); ++ ++ for (i = 0; i < nr_harts; i++) { ++ reri_hart_devices[i].hart_id = -1; /* set of invalid */ ++ ++ hart_phandle = fdt32_to_cpu(target_harts_p[i]); ++ ++ cpu_offset = fdt_node_offset_by_phandle(fdt, hart_phandle); ++ if (cpu_offset < 0) ++ return SBI_ENOENT; ++ ++ cpu_status = fdt_getprop(fdt, cpu_offset, "status", &len); ++ if (cpu_status && ++ strncmp(cpu_status, "okay", strlen("okay")) != 0 && ++ strncmp(cpu_status, "ok", strlen("ok")) != 0) ++ continue; ++ ++ hart_id_p = fdt_getprop(fdt, cpu_offset, "reg", &len); ++ if (!hart_id_p) ++ continue; ++ ++ if ((ret = acpi_ghes_new_error_source(src_id, sse_vec)) < 0) ++ continue; ++ ++ ret = sbi_sse_add_event(sse_vec, NULL); ++ ret = (ret != SBI_EALREADY) ? ret : 0; ++ if (ret) { ++ sbi_printf("Failed to add SSE event %u\n", sse_vec); ++ return ret; ++ } ++ ++ reri_hart_devices[i].dev.addr = (addr + (i * RERI_ERR_BANK_SIZE)); ++ reri_hart_devices[i].dev.size = RERI_ERR_BANK_SIZE; ++ reri_hart_devices[i].dev.sse_vector = sse_vec; ++ reri_hart_devices[i].dev.src_id = src_id++; ++ reri_hart_devices[i].hart_id = fdt32_to_cpu(*hart_id_p); ++ } ++ } else { ++ return SBI_ENOENT; ++ } ++ } ++ ++ return ret; ++} ++ ++int reri_drv_init(const void *fdt, int nodeoff, const struct fdt_match *match) ++{ ++ int ret, doffset, moffset, len; ++ uint64_t addr, size; ++ const fdt32_t *rm_handle_p; ++ uint32_t rm_handle; ++ ++ rm_handle_p = fdt_getprop(fdt, nodeoff, "reserved-memory-handle", &len); ++ if (!rm_handle_p) ++ return SBI_ENOENT; ++ ++ rm_handle = fdt32_to_cpu(*rm_handle_p); ++ moffset = fdt_node_offset_by_phandle(fdt, rm_handle); ++ if (moffset < 0) ++ return SBI_ENOENT; ++ ++ if ((ret = fdt_get_node_addr_size((void *)fdt, moffset, 0, &addr, ++ &size)) == 0) { ++ /* HACK: why size is zero? */ ++ if (size == 0) ++ size = 0x80000; ++ ++ ret = sbi_domain_root_add_memrange(addr, size, PAGE_SIZE, ++ SBI_DOMAIN_MEMREGION_READABLE| ++ SBI_DOMAIN_MEMREGION_WRITEABLE); ++ if (ret < 0) ++ return ret; ++ ++ acpi_ghes_init(addr, size); ++ } ++ ++ fdt_for_each_subnode(doffset, fdt, nodeoff) { ++ if (fdt_parse_reri_device(fdt, doffset) != 0) ++ continue; ++ } ++ ++ return SBI_SUCCESS; ++} ++ ++int reri_drv_sync_hart_errs(u32 hart_id, u32 *pending_vectors) ++{ ++ int ret; ++ riscv_reri_error_bank *heb; ++ riscv_reri_status status; ++ uint64_t hart_addr, err_size; ++ uint64_t eaddr; ++ acpi_ghes_error_info einfo; ++ uint32_t hart_src_id, sse_vector; ++ ++ if (riscv_reri_get_hart_addr(hart_id, &hart_addr, &err_size) != 0) ++ return 0; ++ ++ if (riscv_reri_get_hart_src_id(hart_id, &hart_src_id) != 0) ++ return 0; ++ ++ heb = (riscv_reri_error_bank *)(ulong)hart_addr; ++ status.value = riscv_reri_dev_read_u64(&heb->records[0].status_i.value); ++ ++ eaddr = riscv_reri_dev_read_u64(&heb->records[0].addr_i); ++ ++ /* Error is valid process it */ ++ if (status.v == 1) { ++ riscv_reri_clear_valid_bit(&heb->records[0].control_i.value); ++ if (status.ce) ++ einfo.info.gpe.sev = 2; ++ else if (status.de) ++ einfo.info.gpe.sev = 0; /* deferred, recoverable? */ ++ else if (status.ue) ++ einfo.info.gpe.sev = 1; /* fatal error */ ++ else ++ einfo.info.gpe.sev = 3; /* Unknown */ ++ ++ einfo.info.gpe.validation_bits = (GPE_PROC_TYPE_VALID | ++ GPE_PROC_ISA_VALID | ++ GPE_PROC_ERR_TYPE_VALID); ++ ++ einfo.info.gpe.proc_type = GHES_PROC_TYPE_RISCV; ++ einfo.info.gpe.proc_isa = GHES_PROC_ISA_RISCV64; ++ ++ if (status.tt && ++ (status.tt >= 4 && status.tt <= 7)) { ++ einfo.info.gpe.validation_bits |= GPE_OP_VALID; ++ ++ /* Transaction type */ ++ switch(status.tt) { ++ case RERI_TT_IMPLICIT_READ: ++ einfo.info.gpe.operation = 3; ++ break; ++ case RERI_TT_EXPLICIT_READ: ++ einfo.info.gpe.operation = 1; ++ break; ++ case RERI_TT_IMPLICIT_WRITE: ++ case RERI_TT_EXPLICIT_WRITE: ++ einfo.info.gpe.operation = 2; ++ break; ++ default: ++ einfo.info.gpe.operation = 0; ++ break; ++ } ++ ++ /* Translate error codes from RERI */ ++ switch(status.ec) { ++ case RERI_EC_CBA: ++ case RERI_EC_CSD: ++ case RERI_EC_CAS: ++ case RERI_EC_CUE: ++ einfo.info.gpe.proc_err_type = 0x01; ++ break; ++ case RERI_EC_TPD: ++ case RERI_EC_TPA: ++ case RERI_EC_TPU: ++ einfo.info.gpe.proc_err_type = 0x02; ++ break; ++ case RERI_EC_SBE: ++ einfo.info.gpe.proc_err_type = 0x04; ++ break; ++ case RERI_EC_HSE: ++ case RERI_EC_ITD: ++ case RERI_EC_ITO: ++ case RERI_EC_IWE: ++ case RERI_EC_IDE: ++ case RERI_EC_SMU: ++ case RERI_EC_SMD: ++ case RERI_EC_SMS: ++ case RERI_EC_PIO: ++ case RERI_EC_PUS: ++ case RERI_EC_PTO: ++ case RERI_EC_SIC: ++ einfo.info.gpe.proc_err_type = 0x08; ++ break; ++ default: ++ einfo.info.gpe.proc_err_type = 0x00; ++ break; ++ } ++ } ++ ++ /* Address type */ ++ if (status.at) { ++ einfo.info.gpe.validation_bits |= GPE_TARGET_ADDR_VALID; ++ einfo.info.gpe.target_addr = eaddr; ++ } ++ ++ einfo.etype = ERROR_TYPE_GENERIC_CPU; ++ ++ /* Update the CPER record */ ++ acpi_ghes_record_errors(hart_src_id, &einfo); ++ ++ if ((ret = riscv_reri_get_hart_sse_vector(hart_id, &sse_vector)) != 0) ++ return ret; ++ ++ *pending_vectors = sse_vector; ++ ++ /* TODO: Return number of errors recorded */ ++ return 1; ++ } ++ ++ return 0; ++} +-- +2.27.0 + diff --git a/0010-lib-utils-Add-RAS-agent-service-group-on-MPXY.patch b/0010-lib-utils-Add-RAS-agent-service-group-on-MPXY.patch new file mode 100644 index 0000000000000000000000000000000000000000..2b9e4159c6bbfca2d0dd7eca3c0a9cfa280e3bbf --- /dev/null +++ b/0010-lib-utils-Add-RAS-agent-service-group-on-MPXY.patch @@ -0,0 +1,269 @@ +From f6bcaa7b4428e07bafaa241d81105e02f9eca136 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:39:20 +0530 +Subject: [PATCH 10/16] lib: utils: Add RAS agent service group on MPXY + +Add various services of RAS agent + - Discovery of error sources from S-mode software + - Get list of unique error source ID + - Get error source descriptor for a specifi error source + +The service is exported on MPXY with RPMI as backend. + +Reference: https://github.com/ventanamicro/opensbi/commit/0aa68a94cd036be97c3c8d9dd361604573c4ba3a + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/ras_agent_mpxy.h | 24 ++++ + lib/utils/ras/fdt_ras_agent.c | 6 + + lib/utils/ras/objects.mk | 1 + + lib/utils/ras/ras_agent_mpxy.c | 171 +++++++++++++++++++++++++ + 4 files changed, 202 insertions(+) + create mode 100644 include/sbi_utils/ras/ras_agent_mpxy.h + create mode 100644 lib/utils/ras/ras_agent_mpxy.c + +diff --git a/include/sbi_utils/ras/ras_agent_mpxy.h b/include/sbi_utils/ras/ras_agent_mpxy.h +new file mode 100644 +index 00000000..76bbea51 +--- /dev/null ++++ b/include/sbi_utils/ras/ras_agent_mpxy.h +@@ -0,0 +1,24 @@ ++/* ++ * RISC-V RERI Registers Definitions ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 or ++ * (at your option) any later version. ++ */ ++ ++#ifndef __RAS_AGENT_MPXY_H ++#define __RAS_AGENT_MPXY_H ++ ++/* RAS Agent Services on MPXY/RPMI */ ++#define RAS_GET_NUM_ERR_SRCS 0x1 ++#define RAS_GET_ERR_SRCS_ID_LIST 0x2 ++#define RAS_GET_ERR_SRC_DESC 0x3 ++ ++int ras_mpxy_init(const void *fdt, int nodeoff); ++ ++#endif +diff --git a/lib/utils/ras/fdt_ras_agent.c b/lib/utils/ras/fdt_ras_agent.c +index f93a1638..1f5fac0c 100644 +--- a/lib/utils/ras/fdt_ras_agent.c ++++ b/lib/utils/ras/fdt_ras_agent.c +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -69,6 +70,11 @@ static int sbi_ras_agent_cold_init(const void *fdt, int nodeoff, + if (ret) + return ret; + ++ /* initialize mpxy driver for ras agent */ ++ ret = ras_mpxy_init(fdt, nodeoff); ++ if (ret) ++ return ret; ++ + /* ready to handle errors */ + sbi_ras_set_agent(&sbi_ras_agent); + +diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk +index 1b59de83..fd670ebf 100644 +--- a/lib/utils/ras/objects.mk ++++ b/lib/utils/ras/objects.mk +@@ -15,3 +15,4 @@ libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/ghes.o + carray-fdt_early_drivers-$(CONFIG_FDT_SBI_RAS_AGENT) += fdt_sbi_ras_agent + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/fdt_ras_agent.o + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/reri_drv.o ++libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/ras_agent_mpxy.o +diff --git a/lib/utils/ras/ras_agent_mpxy.c b/lib/utils/ras/ras_agent_mpxy.c +new file mode 100644 +index 00000000..0a027010 +--- /dev/null ++++ b/lib/utils/ras/ras_agent_mpxy.c +@@ -0,0 +1,171 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2024 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct __packed ras_rpmi_resp_hdr { ++ u32 status; ++ u32 flags; ++ u32 remaining; ++ u32 returned; ++}; ++ ++static struct sbi_mpxy_channel ra_mpxy_ch; ++static int ras_agent_read_attributes(struct sbi_mpxy_channel *channel, u32 *outmem, ++ u32 base_attr_id, u32 attr_count); ++static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, ++ void *msgbuf, u32 msg_len, void *respbuf, ++ u32 resp_max_len, unsigned long *resp_len); ++#define MAX_RAS_RPMI_PROPS (MPXY_MSGPROT_RPMI_ATTR_MAX_ID - \ ++ SBI_MPXY_ATTR_MSGPROTO_ATTR_START) ++#define RAS_AGENT_RPMI_SVCGRP_ID 0xC ++#define RAS_AGENT_RPMI_SVCGRP_VER SBI_MPXY_MSGPROTO_VERSION(1, 0) ++/* FIXME: Need separate RPMI implementation ID for OpenSBI */ ++#define RAS_AGENT_RPMI_IMPL_ID 0x0 ++#define RAS_AGENT_RPMI_IMPL_VER SBI_MPXY_MSGPROTO_VERSION(1, 0) ++ ++static u32 ras_rpmi_props[MAX_RAS_RPMI_PROPS] = { RAS_AGENT_RPMI_SVCGRP_ID, ++ RAS_AGENT_RPMI_SVCGRP_VER, ++ RAS_AGENT_RPMI_IMPL_ID, ++ RAS_AGENT_RPMI_IMPL_VER }; ++ ++int ras_mpxy_init(const void *fdt, int nodeoff) ++{ ++ const fdt32_t *chan_id_p; ++ int rc, len; ++ u32 chan_id; ++ ++ memset(&ra_mpxy_ch, 0, sizeof(ra_mpxy_ch)); ++ ++ chan_id_p = fdt_getprop(fdt, nodeoff, "riscv,sbi-mpxy-channel-id", &len); ++ if (!chan_id_p) ++ return SBI_ENOENT; ++ ++ chan_id = fdt32_to_cpu(*chan_id_p); ++ ++ ra_mpxy_ch.channel_id = chan_id; ++ ra_mpxy_ch.send_message_with_response = ras_handle_message; ++ ra_mpxy_ch.send_message_without_response = NULL; ++ ra_mpxy_ch.read_attributes = ras_agent_read_attributes; ++ ra_mpxy_ch.get_notification_events = NULL; ++ ra_mpxy_ch.switch_eventsstate = NULL; ++ ra_mpxy_ch.attrs.msg_data_maxlen = 4096; ++ ++ rc = sbi_mpxy_register_channel(&ra_mpxy_ch); ++ if (rc != SBI_SUCCESS) ++ return rc; ++ ++ return SBI_SUCCESS; ++} ++ ++int ras_agent_read_attributes(struct sbi_mpxy_channel *channel, ++ u32 *outmem, ++ u32 base_attr_id, ++ u32 attr_count) ++{ ++ u32 end_id = base_attr_id + attr_count - 1; ++ u32 index, prop_index; ++ ++ if (end_id >= MPXY_MSGPROT_RPMI_ATTR_MAX_ID) ++ return SBI_ERR_BAD_RANGE; ++ ++ if (!outmem) ++ return SBI_ERR_INVALID_PARAM; ++ ++ prop_index = base_attr_id - SBI_MPXY_ATTR_MSGPROTO_ATTR_START; ++ for (index = 0; index < attr_count; index++) ++ outmem[index] = cpu_to_le32(ras_rpmi_props[index + prop_index]); ++ ++ return 0; ++} ++ ++#define BUF_TO_DATA(_msg_buf) \ ++ (((uint8_t *)_msg_buf) + sizeof(struct ras_rpmi_resp_hdr)) ++ ++static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, ++ void *msgbuf, u32 msg_len, void *respbuf, ++ u32 resp_max_len, unsigned long *resp_len) ++{ ++ int rc = SBI_SUCCESS; ++ int nr, nes; ++ u32 *src_list; ++ u32 src_id; ++ uint8_t *src_desc; ++ struct ras_rpmi_resp_hdr *rhdr = (struct ras_rpmi_resp_hdr *)respbuf; ++ u32 *nsrcs; ++#define MAX_ID_BUF_SZ (sizeof(u32) * MAX_ERR_SRCS) ++ ++ switch(msg_id) { ++ case RAS_GET_NUM_ERR_SRCS: ++ if (!respbuf) ++ return -SBI_EINVAL; ++ ++ memset(respbuf, 0, resp_max_len); ++ nes = acpi_ghes_get_num_err_srcs(); ++ rhdr->flags = 0; ++ rhdr->status = RPMI_SUCCESS; ++ rhdr->remaining = 0; ++ rhdr->returned = cpu_to_le32(nes); ++ ++ nsrcs = (u32 *)BUF_TO_DATA(respbuf); ++ *nsrcs = cpu_to_le32(nes); ++ *resp_len = sizeof(*rhdr) + (sizeof(u32)); ++ rc = SBI_SUCCESS; ++ break; ++ ++ case RAS_GET_ERR_SRCS_ID_LIST: ++ if (!respbuf) ++ return -SBI_EINVAL; ++ ++ src_list = (u32 *)BUF_TO_DATA(respbuf); ++ ++ nr = acpi_ghes_get_err_srcs_list(src_list, ++ resp_max_len/sizeof(u32)); ++ ++ rhdr->status = RPMI_SUCCESS; ++ rhdr->returned = nr; ++ rhdr->remaining = 0; ++ *resp_len = sizeof(*rhdr) + (sizeof(u32) * nr); ++ break; ++ ++ case RAS_GET_ERR_SRC_DESC: ++ rhdr->flags = 0; ++ src_id = *((u32 *)msgbuf); ++ src_desc = (uint8_t *)BUF_TO_DATA(respbuf); ++ acpi_ghes_get_err_src_desc(src_id, (acpi_ghesv2 *)src_desc); ++ ++ rhdr->status = RPMI_SUCCESS; ++ ++ rhdr->returned = sizeof(acpi_ghesv2); ++ rhdr->remaining = 0; ++ *resp_len = sizeof(*rhdr) + sizeof(acpi_ghesv2); ++ break; ++ ++ default: ++ sbi_printf("RAS Agent: Unknown service %u\n", msg_id); ++ rc = SBI_ENOENT; ++ } ++ ++ return rc; ++} +-- +2.27.0 + diff --git a/0011-lib-utils-default-FDT_RAS_RPMI-to-n.patch b/0011-lib-utils-default-FDT_RAS_RPMI-to-n.patch new file mode 100644 index 0000000000000000000000000000000000000000..8b823f71735f8708f28351a88660e13765dd4296 --- /dev/null +++ b/0011-lib-utils-default-FDT_RAS_RPMI-to-n.patch @@ -0,0 +1,30 @@ +From ab73834090a0447b5a04b512c463c3afad47aa7e Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:41:49 +0530 +Subject: [PATCH 11/16] lib: utils: default FDT_RAS_RPMI to n + +Reference: https://github.com/ventanamicro/opensbi/commit/f75aa1341f8981f0ae2ea492a189d64565e3d963 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + lib/utils/ras/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/lib/utils/ras/Kconfig b/lib/utils/ras/Kconfig +index 08eb47e4..70b22408 100644 +--- a/lib/utils/ras/Kconfig ++++ b/lib/utils/ras/Kconfig +@@ -11,7 +11,7 @@ if FDT_RAS + + config FDT_RAS_RPMI + bool "FDT RPMI RAS driver" +- default y ++ default n + + endif + +-- +2.27.0 + diff --git a/0012-lib-utils-Enable-SBI-RAS-Agent-by-default.patch b/0012-lib-utils-Enable-SBI-RAS-Agent-by-default.patch new file mode 100644 index 0000000000000000000000000000000000000000..c7b91268d261be1f2a066d016afe0171e1a541e8 --- /dev/null +++ b/0012-lib-utils-Enable-SBI-RAS-Agent-by-default.patch @@ -0,0 +1,31 @@ +From 51d76eb3b091f0899a855f7f926d00cef5432fcc Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Wed, 29 May 2024 17:43:01 +0530 +Subject: [PATCH 12/16] lib: utils: Enable SBI RAS Agent by default + +Reference: https://github.com/ventanamicro/opensbi/commit/9a312eb3cbf9836beb7c0732ff40ef17150cd820 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + lib/utils/ras/Kconfig | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/lib/utils/ras/Kconfig b/lib/utils/ras/Kconfig +index 70b22408..e0abbed1 100644 +--- a/lib/utils/ras/Kconfig ++++ b/lib/utils/ras/Kconfig +@@ -13,6 +13,9 @@ config FDT_RAS_RPMI + bool "FDT RPMI RAS driver" + default n + ++config FDT_SBI_RAS_AGENT ++ bool "FDT SBI RAS Agent driver" ++ default y + endif + + endmenu +-- +2.27.0 + diff --git a/0013-include-sbi_utils-add-GAS-access-sizes-and-error-def.patch b/0013-include-sbi_utils-add-GAS-access-sizes-and-error-def.patch new file mode 100644 index 0000000000000000000000000000000000000000..a67a1db30862ce528b175be4485b94114201c65e --- /dev/null +++ b/0013-include-sbi_utils-add-GAS-access-sizes-and-error-def.patch @@ -0,0 +1,106 @@ +From bdd9a5480979c8191c7199e64faf38a387765860 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Sat, 24 May 2025 22:20:07 +0530 +Subject: [PATCH 13/16] include: sbi_utils: add GAS access sizes and error + definitions + +- Add access sizes as defined in ACPI GAS. +- Add common hardware error definitions defined in RERI specification +- Add transaction types as defined in RERI specification + +Reference: https://github.com/ventanamicro/opensbi/commit/31ce5378727f24d9fb60038988cd66c4901fdb6d + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/apei_tables.h | 8 +++++ + include/sbi_utils/ras/riscv_reri_regs.h | 48 +++++++++++++++++++++++-- + 2 files changed, 54 insertions(+), 2 deletions(-) + +diff --git a/include/sbi_utils/ras/apei_tables.h b/include/sbi_utils/ras/apei_tables.h +index 20c61c43..38aca7f6 100644 +--- a/include/sbi_utils/ras/apei_tables.h ++++ b/include/sbi_utils/ras/apei_tables.h +@@ -124,6 +124,14 @@ enum { + AML_AS_FFH = 0X7F, + }; + ++enum { ++ GAS_ACCESS_SZ_UNDEF, ++ GAS_ACCESS_SZ_BYTE, ++ GAS_ACCESS_SZ_WORD, ++ GAS_ACCESS_SZ_DWORD, ++ GAS_ACCESS_SZ_QWORD ++}; ++ + typedef struct __packed { + uint8_t asid; + uint8_t reg_bwidth; +diff --git a/include/sbi_utils/ras/riscv_reri_regs.h b/include/sbi_utils/ras/riscv_reri_regs.h +index ac5e2df4..5c7afadc 100644 +--- a/include/sbi_utils/ras/riscv_reri_regs.h ++++ b/include/sbi_utils/ras/riscv_reri_regs.h +@@ -72,8 +72,8 @@ typedef union riscv_reri_control { + uint16_t sinv:1; + uint16_t rsvd0:1; + uint16_t ces:2; +- uint16_t udes:2; +- uint16_t uues:2; ++ uint16_t ueds:2; ++ uint16_t uecs:2; + uint16_t rsvd1:6; + + uint16_t rsvd2; +@@ -157,4 +157,48 @@ typedef struct __packed riscv_reri_error_bank { + riscv_reri_error_record records[MAX_ERROR_RECORDS]; + } riscv_reri_error_bank; + ++enum { ++ RERI_ERR_NO_ERROR, /* 0 */ ++ RERI_ERR_OTH_UNSPEC, /* 1 */ ++ RERI_ERR_CORRUPTED_DATA_ACCESS, /* 2 */ ++ RERI_ERR_CACHE_BLOCK_DATA, /* 3 */ ++ RERI_ERR_CACHE_SCRUBBING_DETECTED, ++ RERI_ERR_CACHE_ADDR_CTRL_STATE, ++ RERI_ERR_CACHE_UNSPEC, ++ RERI_ERR_SNOOP_FILTER_DIREC_ADDR, ++ RERI_ERR_SNOOP_FILTER_DIREC_ADDR_UNSPEC, ++ RERI_ERR_TLB_PAGE_CACHE_DATA, /* 9 */ ++ RERI_ERR_TLB_PAGE_CACHE_ADDR_CONTROL, ++ RERI_ERR_TLB_PAGE_CACHE_UNSPEC, ++ RERI_ERR_HART_STATE, /* 12 */ ++ RERI_ERR_INTERRUPT_CONTROLLER_STATE, ++ RERI_ERR_INTERCONNECT_DATA, ++ RERI_ERR_INTERCONNECT_OTHER, ++ RERI_ERR_INTERNAL_WATCHDOG, ++ RERI_ERR_INTERNAL_DATAPATH_MEM, ++ RERI_ERR_SYS_MEM_COMMAND, ++ RERI_ERR_SYS_MEM_UNSPEC, ++ RERI_ERR_SYS_MEM_DATA, ++ RERI_ERR_SYS_MEM_SCRUBBING, ++ RERI_ERR_PROT_ILL_IO, ++ RERI_ERR_PROT_ILL_UNSPEC, ++ RERI_ERR_PROT_TIMEOUT, ++ RERI_ERR_SYS_INT_CONT, ++ RERI_ERR_DEF_ERR_PASSTHROUGH_NOT_SUPP, ++ RERI_ERR_PCIE_CXL_DETECTED, ++ /* 28 - 63 reserved */ ++ RERI_ERR_CUSTOM_1 = 64, ++ RERI_ERR_CUSTOM_END = 255 ++}; ++ ++/* Transaction Type (TT) encodings */ ++enum { ++ TT_UNSPEC, ++ TT_CUSTOM, ++ TT_EXPLICIT_READ = 4, ++ TT_EXPLICIT_WRITE, ++ TT_IMPLICIT_READ, ++ TT_IMPLICIT_WRITE ++}; ++ + #endif /* __RISCV_RERI_REGS_ */ +-- +2.27.0 + diff --git a/0014-lib-utils-export-memory-allocation-function-for-ghes.patch b/0014-lib-utils-export-memory-allocation-function-for-ghes.patch new file mode 100644 index 0000000000000000000000000000000000000000..9a341690f10986250ccbac298af76bd5ce48743f --- /dev/null +++ b/0014-lib-utils-export-memory-allocation-function-for-ghes.patch @@ -0,0 +1,48 @@ +From 6883f7a0537c22fb398fba738b1011d3e96c3772 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Sat, 24 May 2025 22:24:13 +0530 +Subject: [PATCH 14/16] lib: utils: export memory allocation function for ghes + pool + +GHES has a separate pool from which the memory for CPER records +is allocated. Export the function to allocate memory from the +GHES pool so that the other APEI subsystems can allocate memory +from the same pool. + +Reference: https://github.com/ventanamicro/opensbi/commit/21feb601ddbb62ef8cf8a5cb35405974ac8b444c + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/ghes.h | 1 + + lib/utils/ras/ghes.c | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/include/sbi_utils/ras/ghes.h b/include/sbi_utils/ras/ghes.h +index b3b43bbd..0318ba37 100644 +--- a/include/sbi_utils/ras/ghes.h ++++ b/include/sbi_utils/ras/ghes.h +@@ -46,5 +46,6 @@ void acpi_ghes_record_errors(uint8_t source_id, acpi_ghes_error_info *einfo); + int acpi_ghes_get_num_err_srcs(void); + int acpi_ghes_get_err_srcs_list(uint32_t *src_ids, uint32_t sz); + int acpi_ghes_get_err_src_desc(uint32_t src_id, acpi_ghesv2 *ghes); ++void *acpi_ghes_alloc(uint64_t size); + + #endif +diff --git a/lib/utils/ras/ghes.c b/lib/utils/ras/ghes.c +index af9636f9..7957ab22 100644 +--- a/lib/utils/ras/ghes.c ++++ b/lib/utils/ras/ghes.c +@@ -97,7 +97,7 @@ static u64 *acpi_ghes_gas_register_alloc(void) + return (u64 *)(ulong)addr; + } + +-static void *acpi_ghes_alloc(uint64_t size) ++void *acpi_ghes_alloc(uint64_t size) + { + uint64_t naddr; + uint64_t nsz; +-- +2.27.0 + diff --git a/0015-lib-utils-add-function-to-send-MPXY-message-without-.patch b/0015-lib-utils-add-function-to-send-MPXY-message-without-.patch new file mode 100644 index 0000000000000000000000000000000000000000..dda92f2a729062236d9943d1a43f5ca0f1169486 --- /dev/null +++ b/0015-lib-utils-add-function-to-send-MPXY-message-without-.patch @@ -0,0 +1,55 @@ +From 28354d0bee2d6c750ca98229dd35a63103a13d88 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Sat, 24 May 2025 22:31:04 +0530 +Subject: [PATCH 15/16] lib: utils: add function to send MPXY message without + response + +Add support to send MPXY message without a response. This +alleviates the requiement of setting up shared memory when +only message passing is required. + +Reference: https://github.com/ventanamicro/opensbi/commit/77bf0b886e4e850990eabfd29eef425b2910fba7 + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + lib/utils/ras/ras_agent_mpxy.c | 11 ++++++++++- + 1 file changed, 10 insertions(+), 1 deletion(-) + +diff --git a/lib/utils/ras/ras_agent_mpxy.c b/lib/utils/ras/ras_agent_mpxy.c +index 0a027010..ad8eaac1 100644 +--- a/lib/utils/ras/ras_agent_mpxy.c ++++ b/lib/utils/ras/ras_agent_mpxy.c +@@ -37,6 +37,9 @@ static int ras_agent_read_attributes(struct sbi_mpxy_channel *channel, u32 *outm + static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, + void *msgbuf, u32 msg_len, void *respbuf, + u32 resp_max_len, unsigned long *resp_len); ++static int ras_handle_message_withoutresp(struct sbi_mpxy_channel *channel, ++ u32 message_id, void *tx, u32 tx_len); ++ + #define MAX_RAS_RPMI_PROPS (MPXY_MSGPROT_RPMI_ATTR_MAX_ID - \ + SBI_MPXY_ATTR_MSGPROTO_ATTR_START) + #define RAS_AGENT_RPMI_SVCGRP_ID 0xC +@@ -66,7 +69,7 @@ int ras_mpxy_init(const void *fdt, int nodeoff) + + ra_mpxy_ch.channel_id = chan_id; + ra_mpxy_ch.send_message_with_response = ras_handle_message; +- ra_mpxy_ch.send_message_without_response = NULL; ++ ra_mpxy_ch.send_message_without_response = ras_handle_message_withoutresp; + ra_mpxy_ch.read_attributes = ras_agent_read_attributes; + ra_mpxy_ch.get_notification_events = NULL; + ra_mpxy_ch.switch_eventsstate = NULL; +@@ -169,3 +172,9 @@ static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, + + return rc; + } ++ ++static int ras_handle_message_withoutresp(struct sbi_mpxy_channel *channel, ++ u32 message_id, void *tx, u32 tx_len) ++{ ++ return ras_handle_message(channel, message_id, tx, tx_len, NULL, 0, NULL); ++} +-- +2.27.0 + diff --git a/0016-lib-utils-add-support-for-hardware-error-injection.patch b/0016-lib-utils-add-support-for-hardware-error-injection.patch new file mode 100644 index 0000000000000000000000000000000000000000..31e464ae2dac59c569c0ceb66959d5cc5621a1ff --- /dev/null +++ b/0016-lib-utils-add-support-for-hardware-error-injection.patch @@ -0,0 +1,994 @@ +From 2236969786eba88febc0b3817f078165dc972cd7 Mon Sep 17 00:00:00 2001 +From: Himanshu Chauhan +Date: Sun, 25 May 2025 09:24:26 +0530 +Subject: [PATCH 16/16] lib: utils: add support for hardware error injection + +Add support for hardware Error INJection as defined in ACPI +APEI specification. + +- Generate list of actions/instructions +- Support execute/trigger operation via MPXY messages +- Generate trigger table +- Inject/trigger error injected from user space + +Reference: https://github.com/ventanamicro/opensbi/commit/f1430cfbb91a54fb0de689253768c608270caa3a + +Signed-off-by: Himanshu Chauhan +Signed-off-by: zenghuangyuan +Signed-off-by: liuqingtao +--- + include/sbi_utils/ras/ras_agent_einj.h | 63 +++ + include/sbi_utils/ras/ras_agent_mpxy.h | 8 + + lib/utils/ras/Kconfig | 5 + + lib/utils/ras/objects.mk | 1 + + lib/utils/ras/ras_agent_einj.c | 715 +++++++++++++++++++++++++ + lib/utils/ras/ras_agent_mpxy.c | 57 +- + lib/utils/ras/reri_drv.c | 17 + + 7 files changed, 865 insertions(+), 1 deletion(-) + create mode 100644 include/sbi_utils/ras/ras_agent_einj.h + create mode 100644 lib/utils/ras/ras_agent_einj.c + +diff --git a/include/sbi_utils/ras/ras_agent_einj.h b/include/sbi_utils/ras/ras_agent_einj.h +new file mode 100644 +index 00000000..4fb0d270 +--- /dev/null ++++ b/include/sbi_utils/ras/ras_agent_einj.h +@@ -0,0 +1,63 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2025 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#ifndef __RAS_AGENT_EINJ_H ++#define __RAS_AGENT_EINJ_H ++ ++#include ++ ++typedef struct __packed einj_inst_entry { ++ uint8_t action; ++ uint8_t instruction; ++ uint8_t flags; ++ uint8_t reserved; ++ acpi_gas register_region; ++ uint64_t value; ++ uint64_t mask; ++} einj_inst_entry_t; ++ ++#ifdef CONFIG_FDT_SBI_RAS_AGENT_EINJ ++ ++int einj_init(const void *fdt, int nodeoff); ++int einj_register_error_source(uint16_t src_id, uint64_t err_src_reri_addr); ++int einj_get_total_injection_entries(void); ++einj_inst_entry_t *einj_get_instruction(int index); ++void einj_trigger_operation(void); ++void einj_execute_operation(void); ++ ++#else /* CONFIG_RAS_AGENT_EINJ */ ++ ++#define __unused __attribute__((unused)) ++ ++static __unused int einj_init(const void *fdt, int nodeoff) ++{ ++ return 0; ++} ++ ++static __unused int einj_register_error_source(uint16_t src_id, uint64_t err_src_reri_addr) ++{ ++ return 0; ++} ++ ++static __unused int einj_get_total_injection_entries(void) ++{ ++ return 0; ++} ++ ++static __unused einj_inst_entry_t * einj_get_instruction(int index) ++{ ++ return NULL; ++} ++ ++static __unused void einj_trigger_operation(void) { } ++static __unused void einj_execute_operation(void) { } ++ ++#endif ++ ++#endif /* __RAS_AGENT_EINJ_H */ +diff --git a/include/sbi_utils/ras/ras_agent_mpxy.h b/include/sbi_utils/ras/ras_agent_mpxy.h +index 76bbea51..b36f5691 100644 +--- a/include/sbi_utils/ras/ras_agent_mpxy.h ++++ b/include/sbi_utils/ras/ras_agent_mpxy.h +@@ -19,6 +19,14 @@ + #define RAS_GET_ERR_SRCS_ID_LIST 0x2 + #define RAS_GET_ERR_SRC_DESC 0x3 + ++/* Used to generate EINJ table */ ++#define RAS_EINJ_GET_NUM_INSTRUCTIONS 0x4 ++#define RAS_EINJ_GET_INSTRUCTION 0x5 ++ ++/* Used during error injection/trigger */ ++#define RAS_EINJ_EXECUTE_OPERATION 0x6 ++#define RAS_EINJ_TRIGGER_ERROR 0x7 ++ + int ras_mpxy_init(const void *fdt, int nodeoff); + + #endif +diff --git a/lib/utils/ras/Kconfig b/lib/utils/ras/Kconfig +index e0abbed1..3b104552 100644 +--- a/lib/utils/ras/Kconfig ++++ b/lib/utils/ras/Kconfig +@@ -16,6 +16,11 @@ config FDT_RAS_RPMI + config FDT_SBI_RAS_AGENT + bool "FDT SBI RAS Agent driver" + default y ++ ++config FDT_SBI_RAS_AGENT_EINJ ++ bool "Support for APEI Error INJection" ++ depends on FDT_SBI_RAS_AGENT ++ default y + endif + + endmenu +diff --git a/lib/utils/ras/objects.mk b/lib/utils/ras/objects.mk +index fd670ebf..732249d4 100644 +--- a/lib/utils/ras/objects.mk ++++ b/lib/utils/ras/objects.mk +@@ -16,3 +16,4 @@ carray-fdt_early_drivers-$(CONFIG_FDT_SBI_RAS_AGENT) += fdt_sbi_ras_agent + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/fdt_ras_agent.o + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/reri_drv.o + libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT) += ras/ras_agent_mpxy.o ++libsbiutils-objs-$(CONFIG_FDT_SBI_RAS_AGENT_EINJ) += ras/ras_agent_einj.o +diff --git a/lib/utils/ras/ras_agent_einj.c b/lib/utils/ras/ras_agent_einj.c +new file mode 100644 +index 00000000..d17beb48 +--- /dev/null ++++ b/lib/utils/ras/ras_agent_einj.c +@@ -0,0 +1,715 @@ ++/* ++ * SPDX-License-Identifier: BSD-2-Clause ++ * ++ * Copyright (c) 2025 Ventana Micro Systems, Inc. ++ * ++ * Author(s): ++ * Himanshu Chauhan ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++enum einj_err_type_bit_pos { ++ proc_correctable, ++ proc_uncorrectable_non_fatal, ++ proc_uncorrectable_fatal, ++ mem_correctable, ++ mem_uncorrectable_non_fatal, ++ mem_uncorrectable_fatal, ++ pcie_correctable, ++ pcie_uncorrectable_non_fatal, ++ pcie_uncorrectable_fatal, ++ plat_correctable, ++ plat_uncorrectable_non_fatal, ++ plat_uncorrectable_fatal, ++ cxlcp_correctable, /* CXL.cache Prototol */ ++ cxlcp_uncorrectable_non_fatal, ++ cxlcp_uncorrectable_fatal, ++ cxlmp_correctable, /* CXL.mem Protocol */ ++ cxlmp_uncorrectable_non_fatal, ++ cxlmp_uncorrectable_fatal, ++ /* 18:29 RESERVED */ ++ einjv2_etype = 30, ++ vendor_defined_err_type, ++}; ++ ++enum { ++ einj_action_begin_injection_operation, ++ einj_action_get_trigger_action_table, ++ einj_action_set_error_type, ++ einj_action_get_error_type, ++ einj_action_end_operation, ++ einj_action_execute_operation, ++ einj_action_check_busy_status, ++ einj_action_get_command_status, ++ einj_action_set_error_type_with_address, ++ einj_action_get_execute_operation_timings, ++ einj_action_einjv2_set_error_type, ++ einj_action_einjv2_get_error_type, ++ einj_action_trigger_error = 0xFF, ++}; ++ ++enum { ++ einj_inst_read_register, ++ einj_inst_read_register_value, ++ einj_inst_write_register, ++ einj_inst_write_register_value, ++ einj_inst_noop, ++}; ++ ++typedef struct einj_inst_cont { ++ einj_inst_entry_t inst; ++ struct sbi_dlist node; ++} einj_inst_cont_t; ++ ++typedef struct __packed einj_err_type_wtih_addr_data { ++ uint32_t err_type; ++ uint32_t vendor_etype_ext_offs; ++ uint32_t flags; ++ uint32_t proc_id; ++ uint64_t mem_addr; ++ uint64_t mem_addr_range; ++ uint32_t pcie_bdf; ++} einj_err_type_with_addr_data_t; ++ ++struct einj_reri_err_src { ++ uint64_t addr; ++ uint16_t src_id; ++ ++ struct sbi_dlist node; ++}; ++ ++typedef struct __packed einj_err_trigger_table_header { ++ uint32_t hdr_sz; ++ uint32_t revision; ++ uint32_t tbl_sz; ++ uint32_t entry_cnt; ++} einj_err_trigger_table_header_t; ++ ++static SBI_LIST_HEAD(einj_reri_err_src_list); ++static SBI_LIST_HEAD(einj_inst_list); ++ ++static uint64_t *supported_err_types = NULL; ++static uint64_t *error_to_inject = NULL; ++static int einj_total_injection_entries = 0; ++static uint32_t mpxy_chan_id = 0; ++ ++#define EINJ_BUSY_BIT 0 ++ ++enum { ++ COMMAND_STATUS_SUCCESS, ++ COMMAND_STATUS_UNKNOWN_FAIL, ++ COMMAND_STATUS_INVAL_ACCESS ++}; ++ ++#define EINJ_FFH_TYPE_BIT_SHIFT 60 ++#define EINJ_FFH_TYPE_BIT_MASK (0xful) ++#define EINJ_FFH_CHAN_ID_BIT_SHIFT 8 ++#define EINJ_FFH_CHAN_ID_BIT_MASK (0xffffffULL) ++#define EINJ_FFH_MSG_ID_BIT_SHIFT 0 ++#define EINJ_FFH_MSG_ID_BIT_MASK (0xfful) ++ ++#define MAKE_FFH_ADDR(_type, _chan_id, _msg_id) \ ++ ({ \ ++ uint64_t _ffh_ = ((((uint64_t)_type << EINJ_FFH_TYPE_BIT_SHIFT) & EINJ_FFH_TYPE_BIT_MASK) \ ++ | (((uint64_t)_chan_id << EINJ_FFH_CHAN_ID_BIT_SHIFT) & EINJ_FFH_CHAN_ID_BIT_MASK) \ ++ | (((uint64_t)_msg_id << EINJ_FFH_MSG_ID_BIT_SHIFT) & EINJ_FFH_MSG_ID_BIT_MASK)); \ ++ (_ffh_); \ ++ }) ++ ++#define GET_GAS_ADDRESS_REGION(_op_entries) \ ++ ({ \ ++ uint64_t **_raddr = (uint64_t **)(ulong)_op_entries->register_region.address; \ ++ void *_region = (void *)_raddr; \ ++ (_region); \ ++ }) ++ ++static void *einj_mem_alloc(uint64_t size) ++{ ++ return acpi_ghes_alloc(size); ++} ++ ++static einj_inst_cont_t *einj_alloc_inst_cont(int num_entries) ++{ ++ einj_inst_cont_t *con = NULL; ++ einj_inst_cont_t *t = NULL; ++ int i = 0; ++ ++ t = con = (einj_inst_cont_t *)einj_mem_alloc(sizeof(einj_inst_cont_t) * num_entries); ++ ++ if (con == NULL) ++ return NULL; ++ ++ for (i = 0; i < num_entries; i++) { ++ SBI_INIT_LIST_HEAD(&t->node); ++ t++; ++ } ++ ++ return con; ++} ++ ++static uint64_t *einj_alloc_register(void) ++{ ++ return (uint64_t *)(einj_mem_alloc(sizeof(uint64_t))); ++} ++ ++static inline einj_inst_cont_t *to_einj_inst_cont(struct sbi_dlist *node) ++{ ++ return container_of(node, struct einj_inst_cont, node); ++} ++ ++einj_inst_entry_t * einj_get_inst_for_act(int action, int instruction) ++{ ++ struct sbi_dlist *pos; ++ einj_inst_cont_t *eicont; ++ einj_inst_entry_t *inst; ++ ++ sbi_list_for_each(pos, &einj_inst_list) { ++ eicont = to_einj_inst_cont(pos); ++ inst = &eicont->inst; ++ ++ if (inst->action == action ++ && inst->instruction == instruction) { ++ return inst; ++ } ++ } ++ ++ return NULL; ++} ++ ++void *einj_get_gas_region(int action, int instruction) ++{ ++ void *gaddr; ++ einj_inst_entry_t *inst; ++ ++ inst = einj_get_inst_for_act(action, instruction); ++ ++ if (inst == NULL) ++ return NULL; ++ ++ gaddr = GET_GAS_ADDRESS_REGION(inst); ++ ++ return gaddr; ++} ++ ++static void einj_build_begin_op_entries(void) ++{ ++ int nr_entries = 1; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ inc->inst.action = einj_action_begin_injection_operation; ++ inc->inst.instruction = einj_inst_noop; ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_end_op_entries(void) ++{ ++ int nr_entries = 1; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ inc->inst.action = einj_action_end_operation; ++ inc->inst.instruction = einj_inst_noop; ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++uint64_t *g_einj_busy_status_addr; ++void *g_trigger_action_table_phys = NULL; ++riscv_reri_error_record *g_current_err_rec = NULL; ++ ++static void einj_build_check_busy_status_op_entries(void) ++{ ++ int nr_entries = 1; ++ einj_inst_cont_t *inc = NULL; ++ uint64_t *raddr = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ g_einj_busy_status_addr = raddr = einj_alloc_register(); ++ ++ if (raddr == NULL) ++ return; ++ ++ inc->inst.action = einj_action_check_busy_status; ++ inc->inst.instruction = einj_inst_read_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint32_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_get_trigger_error_action_table_entries(void) ++{ ++ int nr_entries = 1; ++ einj_inst_cont_t *inc = NULL; ++ uint64_t *raddr = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ raddr = einj_alloc_register(); ++ ++ if (raddr == NULL) ++ return; ++ ++ inc->inst.action = einj_action_get_trigger_action_table; ++ inc->inst.instruction = einj_inst_read_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint32_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ *raddr = (ulong)g_trigger_action_table_phys; ++ ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_trigger_action_table(void) ++{ ++ int nr_entries = 1; ++ einj_err_trigger_table_header_t *hdr; ++ einj_inst_entry_t *inst; ++ ++ g_trigger_action_table_phys = einj_mem_alloc(sizeof(*hdr) + (sizeof(*inst) * nr_entries)); ++ ++ if (g_trigger_action_table_phys == NULL) ++ return; ++ ++ hdr = (einj_err_trigger_table_header_t *)g_trigger_action_table_phys; ++ inst = (einj_inst_entry_t *)(((uint8_t *)g_trigger_action_table_phys) + sizeof(einj_err_trigger_table_header_t)); ++ ++ hdr->hdr_sz = sizeof(einj_err_trigger_table_header_t); ++ hdr->revision = 0; ++ hdr->entry_cnt = nr_entries; ++ hdr->tbl_sz = (sizeof(einj_err_trigger_table_header_t) + (nr_entries * sizeof(einj_inst_entry_t))); ++ ++ inst->action = einj_action_trigger_error; ++ inst->instruction = einj_inst_write_register; ++ inst->flags = 0UL; ++ inst->register_region.asid = AML_AS_FFH; ++ inst->register_region.reg_bwidth = sizeof(uint64_t); ++ inst->register_region.reg_boffs = 0; ++ inst->register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inst->register_region.address = MAKE_FFH_ADDR(0, mpxy_chan_id, RAS_EINJ_TRIGGER_ERROR); ++} ++ ++static void einj_build_execute_op_entries(void) ++{ ++ int nr_entries = 1; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ inc->inst.action = einj_action_execute_operation; ++ inc->inst.instruction = einj_inst_write_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_FFH; ++ inc->inst.register_region.reg_bwidth = sizeof(uint64_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = MAKE_FFH_ADDR(0, mpxy_chan_id, RAS_EINJ_EXECUTE_OPERATION); ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_get_err_type_entries(void) ++{ ++ int nr_entries = 1; ++ uint64_t *raddr; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ raddr = einj_alloc_register(); ++ inc->inst.action = einj_action_get_error_type; ++ inc->inst.instruction = einj_inst_read_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint32_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ *raddr = (uint64_t)(*supported_err_types); ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_get_command_status_op_entries(void) ++{ ++ int nr_entries = 1; ++ uint64_t *raddr; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ raddr = einj_alloc_register(); ++ ++ inc->inst.action = einj_action_get_command_status; ++ inc->inst.instruction = einj_inst_read_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint64_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ *raddr = 0; ++ ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_set_err_type_entries(void) ++{ ++ int nr_entries = 1; ++ uint64_t *raddr; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ raddr = einj_alloc_register(); ++ error_to_inject = einj_mem_alloc(sizeof(uint64_t)); ++ ++ if (error_to_inject == NULL) ++ return; ++ ++ *error_to_inject = 0; ++ ++ inc->inst.action = einj_action_set_error_type; ++ inc->inst.instruction = einj_inst_write_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint64_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ *raddr = (ulong)error_to_inject; ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++static void einj_build_set_err_type_with_addr_entries(void) ++{ ++ int nr_entries = 1; ++ uint64_t *raddr = NULL; ++ einj_inst_cont_t *inc = NULL; ++ ++ inc = einj_alloc_inst_cont(nr_entries); ++ ++ if (inc == NULL) ++ return; ++ ++ raddr = einj_alloc_register(); ++ if (raddr == NULL) ++ return; ++ ++ inc->inst.action = einj_action_set_error_type_with_address; ++ inc->inst.instruction = einj_inst_write_register; ++ inc->inst.flags = 0UL; ++ inc->inst.register_region.asid = AML_AS_SYSTEM_MEMORY; ++ inc->inst.register_region.reg_bwidth = sizeof(uint64_t); ++ inc->inst.register_region.reg_boffs = 0; ++ inc->inst.register_region.access_sz = GAS_ACCESS_SZ_QWORD; ++ inc->inst.register_region.address = (ulong)raddr; ++ inc->inst.mask = 0xFFFFFFFFUL; ++ ++ sbi_list_add(&(inc->node), &(einj_inst_list)); ++ einj_total_injection_entries += nr_entries; ++} ++ ++int einj_init(const void *fdt, int nodeoff) ++{ ++ const fdt32_t *chan_id_p; ++ int len; ++ ++ chan_id_p = fdt_getprop(fdt, nodeoff, "riscv,sbi-mpxy-channel-id", &len); ++ if (!chan_id_p) ++ return SBI_ENOENT; ++ ++ mpxy_chan_id = fdt32_to_cpu(*chan_id_p); ++ ++ supported_err_types = einj_mem_alloc(sizeof(uint64_t)); ++ ++ if (supported_err_types == NULL) ++ return SBI_ENOMEM; ++ ++ *supported_err_types = ((0x1UL << proc_correctable) ++ | (0x1UL << proc_uncorrectable_non_fatal)); ++ ++ einj_build_begin_op_entries(); ++ einj_build_end_op_entries(); ++ einj_build_get_err_type_entries(); ++ einj_build_set_err_type_entries(); ++ einj_build_set_err_type_with_addr_entries(); ++ einj_build_execute_op_entries(); ++ einj_build_get_command_status_op_entries(); ++ einj_build_check_busy_status_op_entries(); ++ einj_build_trigger_action_table(); ++ einj_build_get_trigger_error_action_table_entries(); ++ ++ return SBI_SUCCESS; ++} ++ ++int einj_register_error_source(uint16_t src_id, uint64_t err_src_reri_addr) ++{ ++ int rc = 0; ++ ++ struct einj_reri_err_src *einj_src = sbi_malloc(sizeof(struct einj_reri_err_src)); ++ ++ if (einj_src == NULL) { ++ rc = SBI_ENOMEM; ++ goto out; ++ } ++ ++ einj_src->addr = err_src_reri_addr; ++ einj_src->src_id = src_id; ++ SBI_INIT_LIST_HEAD(&einj_src->node); ++ sbi_list_add(&(einj_src->node), &(einj_reri_err_src_list)); ++ ++ out: ++ return rc; ++} ++ ++int einj_get_total_injection_entries(void) ++{ ++ return einj_total_injection_entries; ++} ++ ++static inline struct einj_reri_err_src *to_reri_err_src(struct sbi_dlist *node) ++{ ++ return container_of(node, struct einj_reri_err_src, node); ++} ++ ++static void set_proc_error_with_type(uint32_t hart_id, uint32_t err_type) ++{ ++ struct sbi_dlist *pos; ++ uint64_t *command_status = NULL; ++ struct einj_reri_err_src *reri_src; ++ riscv_reri_error_record *rec; ++ ++ command_status = (uint64_t *)einj_get_gas_region(einj_action_get_command_status, ++ einj_inst_read_register); ++ ++ if ((err_type & (err_type - 1)) != 0) { ++ sbi_printf("More than 1 error type set in error type field\n"); ++ *command_status = COMMAND_STATUS_INVAL_ACCESS; ++ return; ++ } ++ ++ switch(err_type) { ++ case proc_correctable: ++ case proc_uncorrectable_non_fatal: ++ case proc_uncorrectable_fatal: ++ sbi_list_for_each(pos, &(einj_reri_err_src_list)) { ++ reri_src = to_reri_err_src(pos); ++ if (reri_src->src_id == hart_id) { ++ riscv_reri_error_bank *bank = (riscv_reri_error_bank *)(ulong)reri_src->addr; ++ rec = (riscv_reri_error_record *)(&bank->records[0]); ++ /* ++ * Both SET_ERROR_TYPE or SET_ERROR_TYPE_WITH_ADDRESS don't provide a way to ++ * specify details of the errors. For processor only the processor ID ++ * is specified. So setting others to a fixed sane default. ++ */ ++ g_current_err_rec = rec; ++ rec->control_i.ces = 2; ++ rec->control_i.ueds = 2; ++ rec->control_i.uecs = 2; ++ rec->status_i.ec = RERI_ERR_HART_STATE; ++ rec->status_i.tt = TT_IMPLICIT_READ; ++ if (err_type == proc_correctable) { ++ rec->status_i.ce = 1; ++ rec->status_i.de = 0; ++ rec->status_i.ue = 0; ++ } else if (err_type == proc_uncorrectable_non_fatal) { ++ rec->status_i.ce = 0; ++ rec->status_i.de = 1; ++ rec->status_i.ue = 0; ++ } else if (err_type == proc_uncorrectable_fatal) { ++ rec->status_i.ce = 0; ++ rec->status_i.de = 0; ++ rec->status_i.ue = 1; ++ } ++ *command_status = COMMAND_STATUS_SUCCESS; ++ return; ++ } ++ } ++ ++ sbi_printf("%s: Could not find the error source for hart-%u\n", __func__, hart_id); ++ } ++} ++ ++static void set_err_with_address(einj_err_type_with_addr_data_t *wdata) ++{ ++ uint64_t *command_status = NULL; ++ uint32_t hart_id; ++ ++ command_status = (uint64_t *)einj_get_gas_region(einj_action_get_command_status, ++ einj_inst_read_register); ++ ++ if ((wdata->err_type & (wdata->err_type - 1)) != 0) { ++ sbi_printf("More than 1 error type set in error type field\n"); ++ *command_status = COMMAND_STATUS_INVAL_ACCESS; ++ return; ++ } ++ ++ switch(wdata->err_type) { ++ case proc_correctable: ++ case proc_uncorrectable_non_fatal: ++ case proc_uncorrectable_fatal: ++ if (wdata->flags & 0x1UL) { ++ hart_id = wdata->proc_id; ++ set_proc_error_with_type(hart_id, wdata->err_type); ++ } else { ++ sbi_printf("Processor correctable error requested but processor flag is not valid\n"); ++ *command_status = COMMAND_STATUS_INVAL_ACCESS; ++ return; ++ } ++ } ++} ++ ++static void set_err(void) ++{ ++ uint64_t *set_type = (uint64_t *)einj_get_gas_region(einj_action_set_error_type, ++ einj_inst_write_register); ++ ++ switch(*set_type) { ++ case proc_correctable: ++ case proc_uncorrectable_non_fatal: ++ case proc_uncorrectable_fatal: ++ set_proc_error_with_type(0, *set_type); ++ break; ++ default: ++ sbi_printf("%s: Set error type 0x%lx not supported\n", __func__, (ulong)*set_type); ++ break; ++ } ++} ++ ++einj_inst_entry_t *einj_get_instruction(int index) ++{ ++ struct sbi_dlist *pos; ++ einj_inst_cont_t *eicont; ++ int rind = 0; ++ ++ sbi_list_for_each(pos, &einj_inst_list) { ++ if (rind == index) { ++ eicont = to_einj_inst_cont(pos); ++ return &eicont->inst; ++ } ++ rind++; ++ } ++ ++ return NULL; ++} ++ ++static inline void set_status_busy(void) ++{ ++ einj_inst_entry_t *inst = einj_get_inst_for_act(einj_action_check_busy_status, ++ einj_inst_read_register); ++ volatile uint64_t *status = NULL; ++ ++ if (inst && inst->register_region.address) { ++ status = (uint64_t *)(ulong)inst->register_region.address; ++ /* Mark busy in executing operation */ ++ *status |= (0x1UL << EINJ_BUSY_BIT); ++ } ++} ++ ++static inline void set_status_free(void) ++{ ++ einj_inst_entry_t *inst = einj_get_inst_for_act(einj_action_check_busy_status, ++ einj_inst_read_register); ++ volatile uint64_t *status = NULL; ++ ++ if (inst && inst->register_region.address) { ++ status = (uint64_t *)(ulong)inst->register_region.address; ++ /* operation done */ ++ *status &= ~(0x1UL << EINJ_BUSY_BIT); ++ } ++} ++ ++void einj_execute_operation(void) ++{ ++ einj_err_type_with_addr_data_t *wa_data = ++ (einj_err_type_with_addr_data_t *)einj_get_gas_region(einj_action_set_error_type_with_address, ++ einj_inst_write_register); ++ ++ set_status_busy(); ++ ++ if (wa_data->err_type) { ++ set_err_with_address(wa_data); ++ } else { ++ set_err(); ++ } ++ ++ set_status_free(); ++} ++ ++void einj_trigger_operation(void) ++{ ++ if (g_current_err_rec == NULL) { ++ sbi_printf("%s: Trigger address is NULL!\n", __func__); ++ return; ++ } ++ ++ g_current_err_rec->control_i.eid = 10; ++ ++ g_current_err_rec = NULL; ++} +diff --git a/lib/utils/ras/ras_agent_mpxy.c b/lib/utils/ras/ras_agent_mpxy.c +index ad8eaac1..f78bdebf 100644 +--- a/lib/utils/ras/ras_agent_mpxy.c ++++ b/lib/utils/ras/ras_agent_mpxy.c +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -113,10 +114,12 @@ static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, + int rc = SBI_SUCCESS; + int nr, nes; + u32 *src_list; +- u32 src_id; ++ u32 src_id, inst_id; + uint8_t *src_desc; + struct ras_rpmi_resp_hdr *rhdr = (struct ras_rpmi_resp_hdr *)respbuf; + u32 *nsrcs; ++ volatile u32 *index; ++ einj_inst_entry_t *inst = NULL; + #define MAX_ID_BUF_SZ (sizeof(u32) * MAX_ERR_SRCS) + + switch(msg_id) { +@@ -165,6 +168,58 @@ static int ras_handle_message(struct sbi_mpxy_channel *channel, u32 msg_id, + *resp_len = sizeof(*rhdr) + sizeof(acpi_ghesv2); + break; + ++ case RAS_EINJ_GET_NUM_INSTRUCTIONS: ++ if (!respbuf) { ++ sbi_printf("%s: Invalid response buffer\n", __func__); ++ return -SBI_EINVAL; ++ } ++ ++ memset(respbuf, 0, resp_max_len); ++ nes = einj_get_total_injection_entries(); ++ rhdr->flags = 0; ++ rhdr->status = RPMI_SUCCESS; ++ rhdr->remaining = 0; ++ rhdr->returned = cpu_to_le32(nes); ++ ++ nsrcs = (u32 *)BUF_TO_DATA(respbuf); ++ *nsrcs = cpu_to_le32(nes); ++ *resp_len = sizeof(*rhdr) + (sizeof(u32)); ++ rc = SBI_SUCCESS; ++ break; ++ ++ case RAS_EINJ_GET_INSTRUCTION: ++ if (!respbuf) ++ return -SBI_EINVAL; ++ index = (volatile u32 *)BUF_TO_DATA(respbuf); ++ inst_id = *index; ++ inst = einj_get_instruction(inst_id); ++ if (inst == NULL) ++ return -SBI_ENOENT; ++ ++ if (resp_max_len <= sizeof(einj_inst_entry_t)) ++ return -SBI_ENOSPC; ++ ++ ++ memset(respbuf, 0, resp_max_len); ++ memcpy((void *)BUF_TO_DATA(respbuf), (void *)inst, sizeof(einj_inst_entry_t)); ++ *resp_len = sizeof(*rhdr) + sizeof(einj_inst_entry_t); ++ rhdr->flags = 0; ++ rhdr->status = RPMI_SUCCESS; ++ rhdr->remaining = 0; ++ rhdr->returned = sizeof(einj_inst_entry_t); ++ rc = SBI_SUCCESS; ++ break; ++ ++ case RAS_EINJ_EXECUTE_OPERATION: ++ einj_execute_operation(); ++ rc = SBI_SUCCESS; ++ break; ++ ++ case RAS_EINJ_TRIGGER_ERROR: ++ einj_trigger_operation(); ++ rc = SBI_SUCCESS; ++ break; ++ + default: + sbi_printf("RAS Agent: Unknown service %u\n", msg_id); + rc = SBI_ENOENT; +diff --git a/lib/utils/ras/reri_drv.c b/lib/utils/ras/reri_drv.c +index ae011e29..80c843ef 100644 +--- a/lib/utils/ras/reri_drv.c ++++ b/lib/utils/ras/reri_drv.c +@@ -22,6 +22,7 @@ + #include + #include + #include ++#include + + struct reri_generic_dev { + uint64_t addr; +@@ -234,6 +235,13 @@ static int fdt_parse_reri_device(const void *fdt, int nodeoff) + if ((ret = acpi_ghes_new_error_source(src_id, sse_vec)) < 0) + continue; + ++ if ((ret = einj_register_error_source(src_id, addr + (i * RERI_ERR_BANK_SIZE))) != 0) { ++ sbi_printf("Failed register error source %u with EINJ framework." ++ "Error injection will fail.\n", ++ src_id); ++ return ret; ++ } ++ + ret = sbi_sse_add_event(sse_vec, NULL); + ret = (ret != SBI_EALREADY) ? ret : 0; + if (ret) { +@@ -284,6 +292,15 @@ int reri_drv_init(const void *fdt, int nodeoff, const struct fdt_match *match) + return ret; + + acpi_ghes_init(addr, size); ++ ++ /* ++ * NOTE: Initialize error injection framework. EINJ uses the GHES reserved memory ++ * for the entries and the GAS regions. acpi_ghes_init must always be called ++ * before einj_init. ++ */ ++ ret = einj_init(fdt, nodeoff); ++ if (ret) ++ return ret; + } + + fdt_for_each_subnode(doffset, fdt, nodeoff) { +-- +2.27.0 + diff --git a/opensbi.spec b/opensbi.spec index 5546cc8c218bee146f4c65afdf2b656e3fd6a842..8fa8c1c612a18b2703763083501cfed9f741c2fd 100644 --- a/opensbi.spec +++ b/opensbi.spec @@ -5,7 +5,7 @@ Name: opensbi Version: 1.2 -Release: 2 +Release: 3 Summary: RISC-V Open Source Supervisor Binary Interface URL: https://github.com/riscv-software-src/opensbi License: BSD @@ -13,6 +13,21 @@ License: BSD Source0: https://github.com/riscv-software-src/opensbi/archive/refs/tags/%{name}-1.2.tar.gz Patch0: 0001-Penglai-supports-2403.patch +Patch1: 0002-Provide-pre-support-for-RAS.patch +Patch2: 0003-lib-sbi-Introduce-high-priority-interrupt-for-RAS.patch +Patch3: 0004-lib-sbi-Introduce-RAS-common-interface-driver.patch +Patch4: 0005-lib-utils-Introduce-RAS-RPMI-based-driver.patch +Patch5: 0006-lib-sbi-Add-RAS-high-priority-interrupt-handler.patch +Patch6: 0007-include-sbi_utils-Add-reri-register-definitions.patch +Patch7: 0008-lib-utils-Add-support-for-error-source-discovery-and.patch +Patch8: 0009-lib-utils-Add-support-for-RAS-agent-in-OpenSBI.patch +Patch9: 0010-lib-utils-Add-RAS-agent-service-group-on-MPXY.patch +Patch10: 0011-lib-utils-default-FDT_RAS_RPMI-to-n.patch +Patch11: 0012-lib-utils-Enable-SBI-RAS-Agent-by-default.patch +patch12: 0013-include-sbi_utils-add-GAS-access-sizes-and-error-def.patch +Patch13: 0014-lib-utils-export-memory-allocation-function-for-ghes.patch +Patch14: 0015-lib-utils-add-function-to-send-MPXY-message-without-.patch +Patch15: 0016-lib-utils-add-support-for-hardware-error-injection.patch BuildRequires: kernel BuildRequires: gcc, binutils, findutils, coreutils, gzip, file @@ -70,6 +85,9 @@ cp %{buildroot}/share/opensbi/lp64/generic/firmware/fw_payload.elf \ /share/opensbi/lp64/generic/* %changelog +* Wed Dec 03 2025 liuqingtao - 1.2-3-riscv64 +- Add patchs for RAS-SSE-MPXY + * Tue Mar 19 2024 ZhaoXi - 1.2-2-riscv64 - Add patch for Penglai-Enclave-sPMP - 2403